Skip to content

Commit

Permalink
Fix a tiny typo & a tiny bug
Browse files Browse the repository at this point in the history
  • Loading branch information
sadra-barikbin committed May 7, 2024
1 parent 981e10a commit 51c06d6
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 5 deletions.
2 changes: 1 addition & 1 deletion src/lighteval/logging/evaluation_tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -511,7 +511,7 @@ def push_results_to_tensorboard( # noqa: C901
self, results: dict[str, dict[str, float]], details: dict[str, DetailsLogger.CompiledDetail]
):
if not is_nanotron_available():
hlog_warn("You cannot push results to tensorboard with having nanotron installed. Skipping")
hlog_warn("You cannot push results to tensorboard without having nanotron installed. Skipping")
return
config: Config = self.general_config_logger.config
lighteval_config = config.lighteval
Expand Down
8 changes: 4 additions & 4 deletions src/lighteval/models/model_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,9 @@ class BaseModelConfig:
If `None`, the default value will be set to `True` for seq2seq models (e.g. T5) and
`False` for causal models.
model_parallel (bool, optional, defaults to False):
True/False: force to uses or not the `accelerate` library to load a large
True/False: force to use or not the `accelerate` library to load a large
model across multiple devices.
Default: None which correspond to comparing the number of process with
Default: None which corresponds to comparing the number of processes with
the number of GPUs. If it's smaller => model-parallelism, else not.
dtype (Union[str, torch.dtype], optional, defaults to None):):
Converts the model weights to `dtype`, if specified. Strings get
Expand Down Expand Up @@ -279,8 +279,8 @@ def create_model_config(args: Namespace, accelerator: Union["Accelerator", None]

if config["type"] == "tgi":
return TGIModelConfig(
inference_server_address=args["instance"]["inference_server_address"],
inference_server_auth=args["instance"]["inference_server_auth"],
inference_server_address=config["instance"]["inference_server_address"],
inference_server_auth=config["instance"]["inference_server_auth"],
)

if config["type"] == "endpoint":
Expand Down

0 comments on commit 51c06d6

Please sign in to comment.