From 3a808336e19ea8785aabdb259476634eb2e8f33c Mon Sep 17 00:00:00 2001 From: Nathan Habib <30601243+NathanHB@users.noreply.github.com> Date: Thu, 4 Jul 2024 12:48:24 +0200 Subject: [PATCH 1/3] fix llm as judge warnings (#173) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * commit * fixes * fix style * fixes * make style * Fix import error detection for open ai package (llm as a judge metric) --------- Co-authored-by: Nathan Habib Co-authored-by: Clémentine Fourrier <22726840+clefourrier@users.noreply.github.com> --- .../mt_bench => metrics}/judge_prompts.jsonl | 0 src/lighteval/metrics/llm_as_judge.py | 14 ++++++++++--- src/lighteval/metrics/metrics.py | 10 ++++++---- src/lighteval/metrics/metrics_sample.py | 20 ++++++++----------- src/lighteval/tasks/extended/mt_bench/main.py | 2 +- src/lighteval/tasks/lighteval_task.py | 16 ++++++++++++++- src/lighteval/utils.py | 7 +++++++ 7 files changed, 48 insertions(+), 21 deletions(-) rename src/lighteval/{tasks/extended/mt_bench => metrics}/judge_prompts.jsonl (100%) diff --git a/src/lighteval/tasks/extended/mt_bench/judge_prompts.jsonl b/src/lighteval/metrics/judge_prompts.jsonl similarity index 100% rename from src/lighteval/tasks/extended/mt_bench/judge_prompts.jsonl rename to src/lighteval/metrics/judge_prompts.jsonl diff --git a/src/lighteval/metrics/llm_as_judge.py b/src/lighteval/metrics/llm_as_judge.py index 12b637a3e..5b70e9d5e 100644 --- a/src/lighteval/metrics/llm_as_judge.py +++ b/src/lighteval/metrics/llm_as_judge.py @@ -27,9 +27,8 @@ import time from typing import Optional -from openai import OpenAI - from lighteval.logging.hierarchical_logger import hlog_warn +from lighteval.utils import NO_OPENAI_ERROR_MSG, is_openai_available class JudgeOpenAI: @@ -70,7 +69,8 @@ def __init__( openai_api_key: str, multi_turn: bool = False, ): - self.client = OpenAI(api_key=openai_api_key) + self.client = None # loaded lazily + self.openai_api_key = openai_api_key self.model = model self.seed = seed self.temperature = temperature @@ -112,6 +112,14 @@ def evaluate_answer( Raises: Exception: If an error occurs during the API call. """ + if self.client is None: + if not is_openai_available(): + raise ImportError(NO_OPENAI_ERROR_MSG) + + from openai import OpenAI + + self.client = OpenAI(api_key=self.openai_api_key) + prompts = [ self.__get_prompts_single_turn( questions[0], answers[0], references[0] if references is not None and len(references) > 0 else None diff --git a/src/lighteval/metrics/metrics.py b/src/lighteval/metrics/metrics.py index f7eaedba1..f970e8504 100644 --- a/src/lighteval/metrics/metrics.py +++ b/src/lighteval/metrics/metrics.py @@ -20,6 +20,8 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +import os + import numpy as np from aenum import Enum @@ -225,14 +227,14 @@ class Metrics(Enum): corpus_level_fn=np.mean, higher_is_better=True, ) - llm_judge_multi_turn = SampleLevelMetricGrouping( + llm_judge_multi_turn_openai = SampleLevelMetricGrouping( metric=["single_turn", "multi_turn"], higher_is_better=True, category=MetricCategory.LLM_AS_JUDGE_MULTI_TURN, use_case=MetricUseCase.SUMMARIZATION, sample_level_fn=JudgeLLM( judge_model_name="gpt-3.5-turbo", - template_path="src/lighteval/tasks/extended/mt_bench/judge_prompts.jsonl", + template_path=os.path.join(os.path.dirname(__file__), "judge_prompts.jsonl"), multi_turn=True, ).compute, corpus_level_fn={ @@ -240,14 +242,14 @@ class Metrics(Enum): "multi_turn": np.mean, }, ) - llm_judge = SampleLevelMetricGrouping( + llm_judge_openai = SampleLevelMetricGrouping( metric=["judge_score"], higher_is_better=True, category=MetricCategory.LLM_AS_JUDGE, use_case=MetricUseCase.SUMMARIZATION, sample_level_fn=JudgeLLM( judge_model_name="gpt-3.5-turbo", - template_path="src/lighteval/tasks/extended/mt_bench/judge_prompts.jsonl", + template_path=os.path.join(os.path.dirname(__file__), "", "judge_prompts.jsonl"), multi_turn=False, ).compute, corpus_level_fn={ diff --git a/src/lighteval/metrics/metrics_sample.py b/src/lighteval/metrics/metrics_sample.py index 6210f13ed..1a52d6fdd 100644 --- a/src/lighteval/metrics/metrics_sample.py +++ b/src/lighteval/metrics/metrics_sample.py @@ -631,18 +631,14 @@ def __init__(self, judge_model_name: str, template_path: str, multi_turn: bool = OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") self.multi_turn = multi_turn - try: - self.judge = JudgeOpenAI( - model=judge_model_name, - seed=42, - temperature=0.0, - templates_path=template_path, - openai_api_key=OPENAI_API_KEY, - multi_turn=multi_turn, - ) - except Exception as e: - print(f"Could not initialize the JudgeOpenAI model:\n{e}") - self.judge = None + self.judge = JudgeOpenAI( + model=judge_model_name, + seed=42, + temperature=0.0, + templates_path=template_path, + openai_api_key=OPENAI_API_KEY, + multi_turn=multi_turn, + ) def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs) -> dict[str, float]: """ diff --git a/src/lighteval/tasks/extended/mt_bench/main.py b/src/lighteval/tasks/extended/mt_bench/main.py index ec8347b72..a0ce741ca 100644 --- a/src/lighteval/tasks/extended/mt_bench/main.py +++ b/src/lighteval/tasks/extended/mt_bench/main.py @@ -45,7 +45,7 @@ evaluation_splits=["train"], few_shots_split="", few_shots_select="random", - metric=["llm_judge_multi_turn"], + metric=["llm_judge_multi_turn_openai"], generation_size=1024, stop_sequence=[], ) diff --git a/src/lighteval/tasks/lighteval_task.py b/src/lighteval/tasks/lighteval_task.py index f5c7a1f9e..85f4e0256 100644 --- a/src/lighteval/tasks/lighteval_task.py +++ b/src/lighteval/tasks/lighteval_task.py @@ -21,6 +21,7 @@ # SOFTWARE. import collections +import os import random from dataclasses import dataclass from multiprocessing import Pool @@ -53,7 +54,7 @@ RequestType, TaskExampleId, ) -from lighteval.utils import as_list +from lighteval.utils import NO_OPENAI_ERROR_MSG, as_list, is_openai_available from . import tasks_prompt_formatting @@ -200,8 +201,21 @@ def __init__( # noqa: C901 self.metrics = as_list(cfg.metric) self.suite = as_list(cfg.suite) ignored = [metric for metric in self.metrics if Metrics[metric].value.category == MetricCategory.IGNORED] + if len(ignored) > 0: hlog_warn(f"[WARNING] Not implemented yet: ignoring the metric {' ,'.join(ignored)} for task {self.name}.") + + if any( + Metrics[metric].value.category in [MetricCategory.LLM_AS_JUDGE, MetricCategory.LLM_AS_JUDGE_MULTI_TURN] + for metric in self.metrics + ): + if not is_openai_available(): + raise ImportError(NO_OPENAI_ERROR_MSG) + if os.getenv("OPENAI_API_KEY") is None: + raise ValueError( + "Using llm as judge metric but no OPEN_API_KEY were found, please set it with: export OPEN_API_KEY={yourkey}" + ) + current_categories = [Metrics[metric].value.category for metric in self.metrics] self.has_metric_category = {category: (category in current_categories) for category in MetricCategory} # Sub-optimal system - we might want to store metric parametrisation in a yaml conf for example diff --git a/src/lighteval/utils.py b/src/lighteval/utils.py index 3380fc9a5..768a1cd88 100644 --- a/src/lighteval/utils.py +++ b/src/lighteval/utils.py @@ -191,6 +191,13 @@ def is_peft_available() -> bool: NO_PEFT_ERROR_MSG = "You are trying to use adapter weights models, for which you need `peft`, which is not available in your environment. Please install it using pip." +def is_openai_available() -> bool: + return importlib.util.find_spec("openai") is not None + + +NO_OPENAI_ERROR_MSG = "You are trying to use an Open AI LLM as a judge, for which you need `openai`, which is not available in your environment. Please install it using pip." + + def can_load_extended_tasks() -> bool: imports = [] for package in ["langdetect"]: From 0bceaee026bb029cf3ea14ff5a1dc032abcd5543 Mon Sep 17 00:00:00 2001 From: Philipp Schmid <32632186+philschmid@users.noreply.github.com> Date: Thu, 4 Jul 2024 16:38:51 +0200 Subject: [PATCH 2/3] ADD GPT-4 as Judge (#206) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ADD GPT-4 as Judge * Fix style --------- Co-authored-by: Clémentine Fourrier <22726840+clefourrier@users.noreply.github.com> --- src/lighteval/metrics/metrics_sample.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lighteval/metrics/metrics_sample.py b/src/lighteval/metrics/metrics_sample.py index 1a52d6fdd..ef3798e48 100644 --- a/src/lighteval/metrics/metrics_sample.py +++ b/src/lighteval/metrics/metrics_sample.py @@ -622,7 +622,7 @@ def edit_similarity(self, s1, s2): class JudgeLLM: - available_models = ["gpt-3.5-turbo"] + available_models = ["gpt-3.5-turbo", "gpt-4o", "gpt-4-turbo", "gpt-4"] def __init__(self, judge_model_name: str, template_path: str, multi_turn: bool = False): if judge_model_name not in self.available_models: From 843a0f8c7cb9de20f0188c86a06a0eb429c36974 Mon Sep 17 00:00:00 2001 From: Sadra Barikbin Date: Fri, 5 Jul 2024 10:27:37 +0330 Subject: [PATCH 3/3] Fix a few typos and do a tiny refactor (#187) --- run_evals_accelerate.py | 11 ++++++++--- src/lighteval/evaluator.py | 2 +- src/lighteval/logging/evaluation_tracker.py | 2 +- src/lighteval/metrics/imports/bert_scorer.py | 2 +- src/lighteval/metrics/judge_prompts.jsonl | 4 ++-- src/lighteval/metrics/metrics.py | 2 +- src/lighteval/metrics/metrics_sample.py | 2 +- src/lighteval/models/base_model.py | 8 ++++---- src/lighteval/models/model_config.py | 11 ++++------- src/lighteval/models/model_loader.py | 4 ++-- src/lighteval/models/nanotron_model.py | 4 ++-- src/lighteval/tasks/lighteval_task.py | 4 ++-- src/lighteval/tasks/registry.py | 6 +++--- src/lighteval/tasks/requests.py | 6 ++---- 14 files changed, 34 insertions(+), 34 deletions(-) diff --git a/run_evals_accelerate.py b/run_evals_accelerate.py index 23e46cb05..20b6ec9f1 100644 --- a/run_evals_accelerate.py +++ b/run_evals_accelerate.py @@ -51,7 +51,12 @@ def get_parser(): parser.add_argument( "--public_run", default=False, action="store_true", help="Push results and details to a public repo" ) - parser.add_argument("--cache_dir", type=str, default=CACHE_DIR) + parser.add_argument( + "--cache_dir", + type=str, + default=CACHE_DIR, + help="Cache directory for downloaded datasets & model, defaults to `HF_HOME` environment variable", + ) parser.add_argument( "--results_org", type=str, @@ -65,13 +70,13 @@ def get_parser(): "--custom_tasks", type=str, default=None, - help="Path to a file with custom tasks (a TASK list of dict and potentially prompt formating functions)", + help="Path to a file with custom tasks (a TASK list of dict and potentially prompt formatting functions)", ) group.add_argument( "--tasks", type=str, default=None, - help="Id of a task, e.g. 'original|mmlu:abstract_algebra|5|0' or path to a texte file with a list of tasks", + help="Comma-separated ids of tasks, e.g. 'original|mmlu:abstract_algebra|5' or path to a text file with a list of tasks", ) parser.add_argument("--num_fewshot_seeds", type=int, default=1, help="Number of trials the few shots") return parser diff --git a/src/lighteval/evaluator.py b/src/lighteval/evaluator.py index e837b9225..883e5ef70 100644 --- a/src/lighteval/evaluator.py +++ b/src/lighteval/evaluator.py @@ -67,7 +67,7 @@ def evaluate( # noqa: C901 # A request output tupe is a Tuple where the first element is the index of # the request for one document of one task i.e. # task: "arc_easy", doc: "0"# request: "0" -> request_index = 0, - # We can have multiple request per doc for multi choice tasks for example. + # We can have multiple requests per doc for multi choice tasks for example. # all responses for each (task, doc) RequestIndexModelResponseTuple = collections.namedtuple( diff --git a/src/lighteval/logging/evaluation_tracker.py b/src/lighteval/logging/evaluation_tracker.py index 35a835bc1..f4bdf9566 100644 --- a/src/lighteval/logging/evaluation_tracker.py +++ b/src/lighteval/logging/evaluation_tracker.py @@ -511,7 +511,7 @@ def push_results_to_tensorboard( # noqa: C901 self, results: dict[str, dict[str, float]], details: dict[str, DetailsLogger.CompiledDetail] ): if not is_nanotron_available(): - hlog_warn("You cannot push results to tensorboard with having nanotron installed. Skipping") + hlog_warn("You cannot push results to tensorboard without having nanotron installed. Skipping") return config: Config = self.general_config_logger.config lighteval_config = config.lighteval diff --git a/src/lighteval/metrics/imports/bert_scorer.py b/src/lighteval/metrics/imports/bert_scorer.py index 442ee9c75..a5226e48d 100644 --- a/src/lighteval/metrics/imports/bert_scorer.py +++ b/src/lighteval/metrics/imports/bert_scorer.py @@ -163,7 +163,7 @@ def greedy_cos_idf( - :param: `ref_masks` (torch.LongTensor): BxKxK, BERT attention mask for reference sentences. - :param: `ref_idf` (torch.Tensor): BxK, idf score of each word - piece in the reference setence + piece in the reference sentence - :param: `hyp_embedding` (torch.Tensor): embeddings of candidate sentences, BxKxd, B: batch size, K: longest length, d: bert dimenison diff --git a/src/lighteval/metrics/judge_prompts.jsonl b/src/lighteval/metrics/judge_prompts.jsonl index 4ec7524cb..a43ef34c1 100644 --- a/src/lighteval/metrics/judge_prompts.jsonl +++ b/src/lighteval/metrics/judge_prompts.jsonl @@ -4,5 +4,5 @@ {"name": "pair-math-v1-multi-turn", "type": "pairwise", "system_prompt": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user questions. Your evaluation should consider correctness and helpfulness. You will be given reference answers, the assistant A's answers, the assistant B's answers. Your job is to determine which assistant provides correct and helpful answers to the second user question. Begin your evaluation by comparing both assistants' answers with the reference answers. Identify and correct any mistakes. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[[A]]\" if assistant A is better, \"[[B]]\" if assistant B is better, and \"[[C]]\" for a tie.", "prompt_template": "<|The Start of Reference Answer|>\n\n### User:\n{question_1}\n\n### Reference answer:\n{ref_answer_1}\n\n### User:\n{question_2}\n\n### Reference answer:\n{ref_answer_2}\n\n<|The End of Reference Answer|>\n\n\n<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_a_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_a_2}\n\n<|The End of Assistant A's Conversation with User|>\n\n\n<|The Start of Assistant B's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant B:\n{answer_b_1}\n\n### User:\n{question_2}\n\n### Assistant B:\n{answer_b_2}\n\n<|The End of Assistant B's Conversation with User|>", "description": "Prompt for multi-turn general questions", "category": "general", "output_format": "[[A]]"} {"name": "single-v1", "type": "single", "system_prompt": "You are a helpful assistant.", "prompt_template": "[Instruction]\nPlease act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response. Begin your evaluation by providing a short explanation. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n[Question]\n{question}\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of Assistant's Answer]", "description": "Prompt for general questions", "category": "general", "output_format": "[[rating]]"} {"name": "single-math-v1", "type": "single", "system_prompt": "You are a helpful assistant.", "prompt_template": "[Instruction]\nPlease act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider correctness and helpfulness. You will be given a reference answer and the assistant's answer. Begin your evaluation by comparing the assistant's answer with the reference answer. Identify and correct any mistakes. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n[Question]\n{question}\n\n[The Start of Reference Answer]\n{ref_answer_1}\n[The End of Reference Answer]\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of Assistant's Answer]", "description": "Prompt for general questions", "category": "math", "output_format": "[[rating]]"} -{"name": "single-v1-multi-turn", "type": "single", "system_prompt": "Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response. You evaluation should focus on the assistant's answer to the second user question. Begin your evaluation by providing a short explanation. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n", "prompt_template": "<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_2}\n\n<|The End of Assistant A's Conversation with User|>", "description": "Prompt for general questions", "category": "general", "output_format": "[[rating]]"} -{"name": "single-math-v1-multi-turn", "type": "single", "system_prompt": "Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question. Your evaluation should consider correctness and helpfulness. You will be given a reference answer and the assistant's answer. You evaluation should focus on the assistant's answer to the second question. Begin your evaluation by comparing the assistant's answer with the reference answer. Identify and correct any mistakes. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n", "prompt_template": "<|The Start of Reference Answer|>\n\n### User:\n{question_1}\n\n### Reference answer:\n{ref_answer_1}\n\n### User:\n{question_2}\n\n### Reference answer:\n{ref_answer_2}\n\n<|The End of Reference Answer|>\n\n\n<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_2}\n\n<|The End of Assistant A's Conversation with User|>", "description": "Prompt for general questions", "category": "math", "output_format": "[[rating]]"} +{"name": "single-v1-multi-turn", "type": "single", "system_prompt": "Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response. Your evaluation should focus on the assistant's answer to the second user question. Begin your evaluation by providing a short explanation. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n", "prompt_template": "<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_2}\n\n<|The End of Assistant A's Conversation with User|>", "description": "Prompt for general questions", "category": "general", "output_format": "[[rating]]"} +{"name": "single-math-v1-multi-turn", "type": "single", "system_prompt": "Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question. Your evaluation should consider correctness and helpfulness. You will be given a reference answer and the assistant's answer. Your evaluation should focus on the assistant's answer to the second question. Begin your evaluation by comparing the assistant's answer with the reference answer. Identify and correct any mistakes. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n", "prompt_template": "<|The Start of Reference Answer|>\n\n### User:\n{question_1}\n\n### Reference answer:\n{ref_answer_1}\n\n### User:\n{question_2}\n\n### Reference answer:\n{ref_answer_2}\n\n<|The End of Reference Answer|>\n\n\n<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_2}\n\n<|The End of Assistant A's Conversation with User|>", "description": "Prompt for general questions", "category": "math", "output_format": "[[rating]]"} diff --git a/src/lighteval/metrics/metrics.py b/src/lighteval/metrics/metrics.py index f970e8504..262b20a09 100644 --- a/src/lighteval/metrics/metrics.py +++ b/src/lighteval/metrics/metrics.py @@ -249,7 +249,7 @@ class Metrics(Enum): use_case=MetricUseCase.SUMMARIZATION, sample_level_fn=JudgeLLM( judge_model_name="gpt-3.5-turbo", - template_path=os.path.join(os.path.dirname(__file__), "", "judge_prompts.jsonl"), + template_path=os.path.join(os.path.dirname(__file__), "judge_prompts.jsonl"), multi_turn=False, ).compute, corpus_level_fn={ diff --git a/src/lighteval/metrics/metrics_sample.py b/src/lighteval/metrics/metrics_sample.py index ef3798e48..b7876dbc3 100644 --- a/src/lighteval/metrics/metrics_sample.py +++ b/src/lighteval/metrics/metrics_sample.py @@ -644,7 +644,7 @@ def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs) -> dict[ """ Compute the score of a generative task using a llm as a judge. The generative task can be multiturn with 2 turns max, in that case, we - return scores for turn 1 and 2. Also returns user_prompt and judgment + return scores for turn 1 and 2. Also returns user_prompt and judgement which are ignored later by the aggregator. """ diff --git a/src/lighteval/models/base_model.py b/src/lighteval/models/base_model.py index 3913fd80b..df7b3e92c 100644 --- a/src/lighteval/models/base_model.py +++ b/src/lighteval/models/base_model.py @@ -79,7 +79,7 @@ def __init__( self._add_special_tokens = config.add_special_tokens if config.add_special_tokens is not None else False self._tokenizer = self._create_auto_tokenizer(config, env_config) - # If model_parallel is not set we compare the number of process with the number of GPUs + # If model_parallel is not set we compare the number of processes with the number of GPUs self.model = self._create_auto_model(config, env_config) self.model.eval() torch.set_grad_enabled(False) @@ -819,7 +819,7 @@ def _loglikelihood_tokens( ) res.append(answer) - # Clean up GPUS + # Clean up GPUs del model_output del logits del batched_inputs @@ -852,7 +852,7 @@ def prepare_batch_logprob( hlog_warn("max_context is None, using max_length") max_context = self.max_length - # Each sample is concatenated and cut to lenght or padded to max_length + # Each sample is concatenated and cut to length or padded to max_length for orig_tokens in inputs: truncated.append(max(len(orig_tokens) - max_context, 0)) @@ -1030,7 +1030,7 @@ def _loglikelihood_single_token( ) res.append(answer) - # Clean up GPUS + # Clean up GPUs del out del batch_probs del batched_inputs diff --git a/src/lighteval/models/model_config.py b/src/lighteval/models/model_config.py index f2736e1af..b686c9bd7 100644 --- a/src/lighteval/models/model_config.py +++ b/src/lighteval/models/model_config.py @@ -85,9 +85,9 @@ class BaseModelConfig: If `None`, the default value will be set to `True` for seq2seq models (e.g. T5) and `False` for causal models. model_parallel (bool, optional, defaults to False): - True/False: force to uses or not the `accelerate` library to load a large + True/False: force to use or not the `accelerate` library to load a large model across multiple devices. - Default: None which correspond to comparing the number of process with + Default: None which corresponds to comparing the number of processes with the number of GPUs. If it's smaller => model-parallelism, else not. dtype (Union[str, torch.dtype], optional, defaults to None):): Converts the model weights to `dtype`, if specified. Strings get @@ -277,11 +277,8 @@ def create_model_config(args: Namespace, accelerator: Union["Accelerator", None] return BaseModelConfig(**args_dict) - if hasattr(args, "model_config") and args.model_config: - config = args.model_config["model"] - else: - with open(args.model_config_path, "r") as f: - config = yaml.safe_load(f)["model"] + with open(args.model_config_path, "r") as f: + config = yaml.safe_load(f)["model"] if config["type"] == "tgi": return TGIModelConfig( diff --git a/src/lighteval/models/model_loader.py b/src/lighteval/models/model_loader.py index dd55b4241..e662beac0 100644 --- a/src/lighteval/models/model_loader.py +++ b/src/lighteval/models/model_loader.py @@ -57,8 +57,8 @@ def load_model( # noqa: C901 config: Union[BaseModelConfig, AdapterModelConfig, DeltaModelConfig, TGIModelConfig, InferenceEndpointModelConfig], env_config: EnvConfig, ) -> Tuple[Union[BaseModel, AdapterModel, DeltaModel, ModelClient], ModelInfo]: - """Will load either a model from an inference server or a model from a checkpoint. depending - on the arguments passed to the program. + """Will load either a model from an inference server or a model from a checkpoint, depending + on the config type. Args: args (Namespace): arguments passed to the program diff --git a/src/lighteval/models/nanotron_model.py b/src/lighteval/models/nanotron_model.py index 977b2b198..efe207091 100644 --- a/src/lighteval/models/nanotron_model.py +++ b/src/lighteval/models/nanotron_model.py @@ -846,7 +846,7 @@ def _loglikelihood_single_token( tq.desc = f"loglikelihood_single_token Subset {s} Node {dist.get_rank(self.parallel_context.world_pg)} - {human_format(tokens_per_sec)} tokens/s" - # Clean up GPUS + # Clean up GPUs del out del batch_probs del batched_inputs @@ -1083,7 +1083,7 @@ def _loglikelihood_tokens( tokens_per_sec = batched_inputs.numel() / (elapsed_time_per_iteration_ms / 1000) tq.desc = f"loglikelihood Subset {s} Node {dist.get_rank(self.parallel_context.world_pg)} - {human_format(tokens_per_sec)} tokens/s" - # Clean up GPUS + # Clean up GPUs del out del logits del batched_inputs diff --git a/src/lighteval/tasks/lighteval_task.py b/src/lighteval/tasks/lighteval_task.py index 85f4e0256..33934caa0 100644 --- a/src/lighteval/tasks/lighteval_task.py +++ b/src/lighteval/tasks/lighteval_task.py @@ -26,7 +26,7 @@ from dataclasses import dataclass from multiprocessing import Pool from pathlib import Path -from typing import TYPE_CHECKING, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union from datasets import load_dataset @@ -454,7 +454,7 @@ def get_request_type(self) -> list[RequestType]: # noqa C901 def construct_requests( self, formatted_doc: Doc, context: str, document_id_seed: str, current_task_name: str - ) -> List[Request]: + ) -> Dict[RequestType, List[Request]]: """ Constructs a list of requests from the task based on the given parameters. diff --git a/src/lighteval/tasks/registry.py b/src/lighteval/tasks/registry.py index abaa17451..df5e4da6a 100644 --- a/src/lighteval/tasks/registry.py +++ b/src/lighteval/tasks/registry.py @@ -117,7 +117,7 @@ def get_task_dict( Args: task_name_list (List[str]): A list of task names. - custom_tasks (Optional[Union[str, ModuleType]]): Path to the custom tasks file or name of a module to import containing custom tasks or the module it-self + custom_tasks (Optional[Union[str, ModuleType]]): Path to the custom tasks file or name of a module to import containing custom tasks or the module itself extended_tasks (Optional[str]): The path to the extended tasks group of submodules Returns: @@ -159,7 +159,7 @@ def create_custom_tasks_module(custom_tasks: Union[str, ModuleType]) -> ModuleTy """Creates a custom task module to load tasks defined by the user in their own file. Args: - custom_tasks (Optional[Union[str, ModuleType]]): Path to the custom tasks file or name of a module to import containing custom tasks or the module it-self + custom_tasks (Optional[Union[str, ModuleType]]): Path to the custom tasks file or name of a module to import containing custom tasks or the module itself Returns: ModuleType: The newly imported/created custom tasks modules @@ -178,7 +178,7 @@ def get_custom_tasks(custom_tasks: Union[str, ModuleType]) -> Tuple[ModuleType, """Get all the custom tasks available from the given custom tasks file or module. Args: - custom_tasks (Optional[Union[str, ModuleType]]): Path to the custom tasks file or name of a module to import containing custom tasks or the module it-self + custom_tasks (Optional[Union[str, ModuleType]]): Path to the custom tasks file or name of a module to import containing custom tasks or the module itself """ custom_tasks_module = create_custom_tasks_module(custom_tasks=custom_tasks) tasks_string = "" diff --git a/src/lighteval/tasks/requests.py b/src/lighteval/tasks/requests.py index 283e6959f..6dd307868 100644 --- a/src/lighteval/tasks/requests.py +++ b/src/lighteval/tasks/requests.py @@ -143,7 +143,7 @@ class TaskExampleId(NamedTuple): Represents the identifier for an example in a task. Attributes: - task_name (str): The name of the task. + task_name (str): The name of the task in `name|num_fewshot` format. doc_id_seed (str): The document id with the seed used for few_shot appended at the end. """ @@ -187,9 +187,7 @@ def get_golds(self, few_shot: bool = False): choices = self.choices golds = [] for gold_ix in gold_indices: - local_golds = as_list(choices[gold_ix]) - for local_gold in local_golds: - golds.append(local_gold) + golds.extend(as_list(choices[gold_ix])) return golds def __repr__(self):