Skip to content

Commit

Permalink
Merge branch 'main' into function_prompts
Browse files Browse the repository at this point in the history
  • Loading branch information
clefourrier authored Jul 5, 2024
2 parents cde6c04 + 843a0f8 commit c656d64
Show file tree
Hide file tree
Showing 17 changed files with 82 additions and 55 deletions.
11 changes: 8 additions & 3 deletions run_evals_accelerate.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,12 @@ def get_parser():
parser.add_argument(
"--public_run", default=False, action="store_true", help="Push results and details to a public repo"
)
parser.add_argument("--cache_dir", type=str, default=CACHE_DIR)
parser.add_argument(
"--cache_dir",
type=str,
default=CACHE_DIR,
help="Cache directory for downloaded datasets & model, defaults to `HF_HOME` environment variable",
)
parser.add_argument(
"--results_org",
type=str,
Expand All @@ -65,13 +70,13 @@ def get_parser():
"--custom_tasks",
type=str,
default=None,
help="Path to a file with custom tasks (a TASK list of dict and potentially prompt formating functions)",
help="Path to a file with custom tasks (a TASK list of dict and potentially prompt formatting functions)",
)
group.add_argument(
"--tasks",
type=str,
default=None,
help="Id of a task, e.g. 'original|mmlu:abstract_algebra|5|0' or path to a texte file with a list of tasks",
help="Comma-separated ids of tasks, e.g. 'original|mmlu:abstract_algebra|5' or path to a text file with a list of tasks",
)
parser.add_argument("--num_fewshot_seeds", type=int, default=1, help="Number of trials the few shots")
return parser
Expand Down
2 changes: 1 addition & 1 deletion src/lighteval/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def evaluate( # noqa: C901
# A request output tupe is a Tuple where the first element is the index of
# the request for one document of one task i.e.
# task: "arc_easy", doc: "0"# request: "0" -> request_index = 0,
# We can have multiple request per doc for multi choice tasks for example.
# We can have multiple requests per doc for multi choice tasks for example.

# all responses for each (task, doc)
RequestIndexModelResponseTuple = collections.namedtuple(
Expand Down
2 changes: 1 addition & 1 deletion src/lighteval/logging/evaluation_tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,7 @@ def push_results_to_tensorboard( # noqa: C901
self, results: dict[str, dict[str, float]], details: dict[str, DetailsLogger.CompiledDetail]
):
if not is_nanotron_available():
hlog_warn("You cannot push results to tensorboard with having nanotron installed. Skipping")
hlog_warn("You cannot push results to tensorboard without having nanotron installed. Skipping")
return
config: Config = self.general_config_logger.config
lighteval_config = config.lighteval
Expand Down
2 changes: 1 addition & 1 deletion src/lighteval/metrics/imports/bert_scorer.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def greedy_cos_idf(
- :param: `ref_masks` (torch.LongTensor): BxKxK, BERT attention mask for
reference sentences.
- :param: `ref_idf` (torch.Tensor): BxK, idf score of each word
piece in the reference setence
piece in the reference sentence
- :param: `hyp_embedding` (torch.Tensor):
embeddings of candidate sentences, BxKxd,
B: batch size, K: longest length, d: bert dimenison
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,5 @@
{"name": "pair-math-v1-multi-turn", "type": "pairwise", "system_prompt": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user questions. Your evaluation should consider correctness and helpfulness. You will be given reference answers, the assistant A's answers, the assistant B's answers. Your job is to determine which assistant provides correct and helpful answers to the second user question. Begin your evaluation by comparing both assistants' answers with the reference answers. Identify and correct any mistakes. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[[A]]\" if assistant A is better, \"[[B]]\" if assistant B is better, and \"[[C]]\" for a tie.", "prompt_template": "<|The Start of Reference Answer|>\n\n### User:\n{question_1}\n\n### Reference answer:\n{ref_answer_1}\n\n### User:\n{question_2}\n\n### Reference answer:\n{ref_answer_2}\n\n<|The End of Reference Answer|>\n\n\n<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_a_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_a_2}\n\n<|The End of Assistant A's Conversation with User|>\n\n\n<|The Start of Assistant B's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant B:\n{answer_b_1}\n\n### User:\n{question_2}\n\n### Assistant B:\n{answer_b_2}\n\n<|The End of Assistant B's Conversation with User|>", "description": "Prompt for multi-turn general questions", "category": "general", "output_format": "[[A]]"}
{"name": "single-v1", "type": "single", "system_prompt": "You are a helpful assistant.", "prompt_template": "[Instruction]\nPlease act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response. Begin your evaluation by providing a short explanation. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n[Question]\n{question}\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of Assistant's Answer]", "description": "Prompt for general questions", "category": "general", "output_format": "[[rating]]"}
{"name": "single-math-v1", "type": "single", "system_prompt": "You are a helpful assistant.", "prompt_template": "[Instruction]\nPlease act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider correctness and helpfulness. You will be given a reference answer and the assistant's answer. Begin your evaluation by comparing the assistant's answer with the reference answer. Identify and correct any mistakes. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n[Question]\n{question}\n\n[The Start of Reference Answer]\n{ref_answer_1}\n[The End of Reference Answer]\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of Assistant's Answer]", "description": "Prompt for general questions", "category": "math", "output_format": "[[rating]]"}
{"name": "single-v1-multi-turn", "type": "single", "system_prompt": "Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response. You evaluation should focus on the assistant's answer to the second user question. Begin your evaluation by providing a short explanation. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n", "prompt_template": "<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_2}\n\n<|The End of Assistant A's Conversation with User|>", "description": "Prompt for general questions", "category": "general", "output_format": "[[rating]]"}
{"name": "single-math-v1-multi-turn", "type": "single", "system_prompt": "Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question. Your evaluation should consider correctness and helpfulness. You will be given a reference answer and the assistant's answer. You evaluation should focus on the assistant's answer to the second question. Begin your evaluation by comparing the assistant's answer with the reference answer. Identify and correct any mistakes. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n", "prompt_template": "<|The Start of Reference Answer|>\n\n### User:\n{question_1}\n\n### Reference answer:\n{ref_answer_1}\n\n### User:\n{question_2}\n\n### Reference answer:\n{ref_answer_2}\n\n<|The End of Reference Answer|>\n\n\n<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_2}\n\n<|The End of Assistant A's Conversation with User|>", "description": "Prompt for general questions", "category": "math", "output_format": "[[rating]]"}
{"name": "single-v1-multi-turn", "type": "single", "system_prompt": "Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response. Your evaluation should focus on the assistant's answer to the second user question. Begin your evaluation by providing a short explanation. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n", "prompt_template": "<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_2}\n\n<|The End of Assistant A's Conversation with User|>", "description": "Prompt for general questions", "category": "general", "output_format": "[[rating]]"}
{"name": "single-math-v1-multi-turn", "type": "single", "system_prompt": "Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question. Your evaluation should consider correctness and helpfulness. You will be given a reference answer and the assistant's answer. Your evaluation should focus on the assistant's answer to the second question. Begin your evaluation by comparing the assistant's answer with the reference answer. Identify and correct any mistakes. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n", "prompt_template": "<|The Start of Reference Answer|>\n\n### User:\n{question_1}\n\n### Reference answer:\n{ref_answer_1}\n\n### User:\n{question_2}\n\n### Reference answer:\n{ref_answer_2}\n\n<|The End of Reference Answer|>\n\n\n<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_2}\n\n<|The End of Assistant A's Conversation with User|>", "description": "Prompt for general questions", "category": "math", "output_format": "[[rating]]"}
14 changes: 11 additions & 3 deletions src/lighteval/metrics/llm_as_judge.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,8 @@
import time
from typing import Optional

from openai import OpenAI

from lighteval.logging.hierarchical_logger import hlog_warn
from lighteval.utils import NO_OPENAI_ERROR_MSG, is_openai_available


class JudgeOpenAI:
Expand Down Expand Up @@ -70,7 +69,8 @@ def __init__(
openai_api_key: str,
multi_turn: bool = False,
):
self.client = OpenAI(api_key=openai_api_key)
self.client = None # loaded lazily
self.openai_api_key = openai_api_key
self.model = model
self.seed = seed
self.temperature = temperature
Expand Down Expand Up @@ -112,6 +112,14 @@ def evaluate_answer(
Raises:
Exception: If an error occurs during the API call.
"""
if self.client is None:
if not is_openai_available():
raise ImportError(NO_OPENAI_ERROR_MSG)

from openai import OpenAI

self.client = OpenAI(api_key=self.openai_api_key)

prompts = [
self.__get_prompts_single_turn(
questions[0], answers[0], references[0] if references is not None and len(references) > 0 else None
Expand Down
10 changes: 6 additions & 4 deletions src/lighteval/metrics/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

import os

import numpy as np
from aenum import Enum

Expand Down Expand Up @@ -225,29 +227,29 @@ class Metrics(Enum):
corpus_level_fn=np.mean,
higher_is_better=True,
)
llm_judge_multi_turn = SampleLevelMetricGrouping(
llm_judge_multi_turn_openai = SampleLevelMetricGrouping(
metric=["single_turn", "multi_turn"],
higher_is_better=True,
category=MetricCategory.LLM_AS_JUDGE_MULTI_TURN,
use_case=MetricUseCase.SUMMARIZATION,
sample_level_fn=JudgeLLM(
judge_model_name="gpt-3.5-turbo",
template_path="src/lighteval/tasks/extended/mt_bench/judge_prompts.jsonl",
template_path=os.path.join(os.path.dirname(__file__), "judge_prompts.jsonl"),
multi_turn=True,
).compute,
corpus_level_fn={
"single_turn": np.mean,
"multi_turn": np.mean,
},
)
llm_judge = SampleLevelMetricGrouping(
llm_judge_openai = SampleLevelMetricGrouping(
metric=["judge_score"],
higher_is_better=True,
category=MetricCategory.LLM_AS_JUDGE,
use_case=MetricUseCase.SUMMARIZATION,
sample_level_fn=JudgeLLM(
judge_model_name="gpt-3.5-turbo",
template_path="src/lighteval/tasks/extended/mt_bench/judge_prompts.jsonl",
template_path=os.path.join(os.path.dirname(__file__), "judge_prompts.jsonl"),
multi_turn=False,
).compute,
corpus_level_fn={
Expand Down
24 changes: 10 additions & 14 deletions src/lighteval/metrics/metrics_sample.py
Original file line number Diff line number Diff line change
Expand Up @@ -622,7 +622,7 @@ def edit_similarity(self, s1, s2):


class JudgeLLM:
available_models = ["gpt-3.5-turbo"]
available_models = ["gpt-3.5-turbo", "gpt-4o", "gpt-4-turbo", "gpt-4"]

def __init__(self, judge_model_name: str, template_path: str, multi_turn: bool = False):
if judge_model_name not in self.available_models:
Expand All @@ -631,24 +631,20 @@ def __init__(self, judge_model_name: str, template_path: str, multi_turn: bool =
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
self.multi_turn = multi_turn

try:
self.judge = JudgeOpenAI(
model=judge_model_name,
seed=42,
temperature=0.0,
templates_path=template_path,
openai_api_key=OPENAI_API_KEY,
multi_turn=multi_turn,
)
except Exception as e:
print(f"Could not initialize the JudgeOpenAI model:\n{e}")
self.judge = None
self.judge = JudgeOpenAI(
model=judge_model_name,
seed=42,
temperature=0.0,
templates_path=template_path,
openai_api_key=OPENAI_API_KEY,
multi_turn=multi_turn,
)

def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs) -> dict[str, float]:
"""
Compute the score of a generative task using a llm as a judge.
The generative task can be multiturn with 2 turns max, in that case, we
return scores for turn 1 and 2. Also returns user_prompt and judgment
return scores for turn 1 and 2. Also returns user_prompt and judgement
which are ignored later by the aggregator.
"""

Expand Down
8 changes: 4 additions & 4 deletions src/lighteval/models/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def __init__(
self._add_special_tokens = config.add_special_tokens if config.add_special_tokens is not None else False
self._tokenizer = self._create_auto_tokenizer(config, env_config)

# If model_parallel is not set we compare the number of process with the number of GPUs
# If model_parallel is not set we compare the number of processes with the number of GPUs
self.model = self._create_auto_model(config, env_config)
self.model.eval()
torch.set_grad_enabled(False)
Expand Down Expand Up @@ -819,7 +819,7 @@ def _loglikelihood_tokens(
)
res.append(answer)

# Clean up GPUS
# Clean up GPUs
del model_output
del logits
del batched_inputs
Expand Down Expand Up @@ -852,7 +852,7 @@ def prepare_batch_logprob(
hlog_warn("max_context is None, using max_length")
max_context = self.max_length

# Each sample is concatenated and cut to lenght or padded to max_length
# Each sample is concatenated and cut to length or padded to max_length
for orig_tokens in inputs:
truncated.append(max(len(orig_tokens) - max_context, 0))

Expand Down Expand Up @@ -1030,7 +1030,7 @@ def _loglikelihood_single_token(
)
res.append(answer)

# Clean up GPUS
# Clean up GPUs
del out
del batch_probs
del batched_inputs
Expand Down
11 changes: 4 additions & 7 deletions src/lighteval/models/model_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,9 @@ class BaseModelConfig:
If `None`, the default value will be set to `True` for seq2seq models (e.g. T5) and
`False` for causal models.
model_parallel (bool, optional, defaults to False):
True/False: force to uses or not the `accelerate` library to load a large
True/False: force to use or not the `accelerate` library to load a large
model across multiple devices.
Default: None which correspond to comparing the number of process with
Default: None which corresponds to comparing the number of processes with
the number of GPUs. If it's smaller => model-parallelism, else not.
dtype (Union[str, torch.dtype], optional, defaults to None):):
Converts the model weights to `dtype`, if specified. Strings get
Expand Down Expand Up @@ -277,11 +277,8 @@ def create_model_config(args: Namespace, accelerator: Union["Accelerator", None]

return BaseModelConfig(**args_dict)

if hasattr(args, "model_config") and args.model_config:
config = args.model_config["model"]
else:
with open(args.model_config_path, "r") as f:
config = yaml.safe_load(f)["model"]
with open(args.model_config_path, "r") as f:
config = yaml.safe_load(f)["model"]

if config["type"] == "tgi":
return TGIModelConfig(
Expand Down
4 changes: 2 additions & 2 deletions src/lighteval/models/model_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ def load_model( # noqa: C901
config: Union[BaseModelConfig, AdapterModelConfig, DeltaModelConfig, TGIModelConfig, InferenceEndpointModelConfig],
env_config: EnvConfig,
) -> Tuple[Union[BaseModel, AdapterModel, DeltaModel, ModelClient], ModelInfo]:
"""Will load either a model from an inference server or a model from a checkpoint. depending
on the arguments passed to the program.
"""Will load either a model from an inference server or a model from a checkpoint, depending
on the config type.
Args:
args (Namespace): arguments passed to the program
Expand Down
4 changes: 2 additions & 2 deletions src/lighteval/models/nanotron_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -846,7 +846,7 @@ def _loglikelihood_single_token(

tq.desc = f"loglikelihood_single_token Subset {s} Node {dist.get_rank(self.parallel_context.world_pg)} - {human_format(tokens_per_sec)} tokens/s"

# Clean up GPUS
# Clean up GPUs
del out
del batch_probs
del batched_inputs
Expand Down Expand Up @@ -1083,7 +1083,7 @@ def _loglikelihood_tokens(
tokens_per_sec = batched_inputs.numel() / (elapsed_time_per_iteration_ms / 1000)
tq.desc = f"loglikelihood Subset {s} Node {dist.get_rank(self.parallel_context.world_pg)} - {human_format(tokens_per_sec)} tokens/s"

# Clean up GPUS
# Clean up GPUs
del out
del logits
del batched_inputs
Expand Down
Loading

0 comments on commit c656d64

Please sign in to comment.