From 7c9ccaa9913c859f620ae536b229ebd96b182e6b Mon Sep 17 00:00:00 2001 From: Abhijeetsingh Meena Date: Wed, 30 Oct 2024 13:54:57 +0530 Subject: [PATCH 01/15] init(eval): add baseline DiscoveryBench infer script Signed-off-by: Abhijeetsingh Meena --- evaluation/discoverybench/run_infer.py | 63 ++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 evaluation/discoverybench/run_infer.py diff --git a/evaluation/discoverybench/run_infer.py b/evaluation/discoverybench/run_infer.py new file mode 100644 index 000000000000..8b3c98e0379f --- /dev/null +++ b/evaluation/discoverybench/run_infer.py @@ -0,0 +1,63 @@ +import os + +import git + +from evaluation.utils.shared import ( + make_metadata, + prepare_dataset, + run_evaluation, +) +from openhands.core.config import ( + get_llm_config_arg, + parse_arguments, +) + + +def process_instance(instance, metadata, output_file): ... + + +def create_dataset(repo_location): ... + + +if __name__ == '__main__': + args = parse_arguments() + + # clone git repositor for csv files + repo_url = 'https://github.com/allenai/discoverybench.git' + repo_location = 'git-discoverybench-allenai' + + try: + git.Repo.clone_from(repo_url, repo_location) + except git.exc.GitCommandError: + print('Repository already exists') + + dataset = create_dataset(repo_location) + + # check if there is any empty csv_file + if dataset['data_files'].isnull().any(): + raise ValueError('Some csv files are missing.') + + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') + + metadata = make_metadata( + llm_config, + 'discoverybench-python', + args.agent_cls, + args.max_iterations, + args.eval_note, + args.eval_output_dir, + ) + output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') + instances = prepare_dataset(dataset, output_file, args.eval_n_limit) + + run_evaluation( + instances, + metadata, + output_file, + args.eval_num_workers, + process_instance, + ) From da61d2042c56e40701d26d06954c48801b4d92cd Mon Sep 17 00:00:00 2001 From: Abhijeetsingh Meena Date: Wed, 30 Oct 2024 14:03:58 +0530 Subject: [PATCH 02/15] feat(eval): implement create_dataset function to clone and prepare dataset Signed-off-by: Abhijeetsingh Meena --- evaluation/discoverybench/run_infer.py | 114 ++++++++++++++++++++++++- 1 file changed, 113 insertions(+), 1 deletion(-) diff --git a/evaluation/discoverybench/run_infer.py b/evaluation/discoverybench/run_infer.py index 8b3c98e0379f..495c53483208 100644 --- a/evaluation/discoverybench/run_infer.py +++ b/evaluation/discoverybench/run_infer.py @@ -1,6 +1,8 @@ +import json import os import git +import pandas as pd from evaluation.utils.shared import ( make_metadata, @@ -12,11 +14,121 @@ parse_arguments, ) +DATA_FILES = {} + def process_instance(instance, metadata, output_file): ... -def create_dataset(repo_location): ... +def update_csv_name(name): + name = name.replace('-', '_') + + if 'meta_regression' in name: + name = name.replace('meta_regression', 'meta-regression') + if 'ML_enabled' in name: + name = name.replace('ML_enabled', 'ML-enabled') + + return name + + +def list_csv_files(list_of_datasets): + res = [] + for ele in list_of_datasets: + for key, value in ele.items(): + if key == 'name': + csv_file_name = update_csv_name(value) + res.append(DATA_FILES[csv_file_name]) + return res + + +def create_dataset(repo_location: str, split: str = 'test'): + """ + Create a dataset from the discoverybench repository + by walking through the repository and extracting metadata + from the metadata_{}.json files + + Args: + repo_location: Location of the repository + split: Split of the dataset to use + + Returns: + df: DataFrame containing the dataset instances + """ + + data_dict = {} + + data_location = os.path.join(repo_location, 'discoverybench', 'real', split) + answer_key_location = os.path.join(repo_location, 'eval', 'answer_key_real.csv') + + idx = 0 + + for root, dirs, files in os.walk(data_location): + for file in files: + if file.endswith('.json'): + if 'metadata' in file: + metadata = json.load(open(os.path.join(root, file))) + + dataset = root.split('/')[-1] + metadata_id = file.split('_')[-1].split('.')[0] + domain = metadata.get('domain', '') + domain_knowledge = metadata.get('domain_knowledge', '') + workflow_tags = metadata.get('workflow_tags', '') + datasets = metadata.get('datasets', []) + queries = metadata.get('queries', []) + gold_workflow = metadata.get('workflow') + + # loop through queries list to get queries + # and each query has qid; add that to dictionary + for query in queries[0]: + qid = query.get('qid', '') + + data = { + 'dataset': dataset, + 'metadata_id': metadata_id, + 'qid': qid, + 'domain': domain, + 'domain_knowledge': domain_knowledge, + 'workflow_tags': workflow_tags, + 'datasets': datasets, + 'question_type': query['question_type'], + 'query': query['question'], + 'gold_workflow': gold_workflow, + 'dataset_metadata': metadata, + } + + data_dict[idx] = data + idx += 1 + + if file.endswith('.csv'): + DATA_FILES[file] = os.path.join(root, file) + if file.endswith('.txt'): + DATA_FILES[file] = os.path.join(root, file) + + df = pd.DataFrame.from_dict(data_dict, orient='index') + + df['instance_id'] = df.index + + df['data_files'] = df['datasets'].apply(lambda x: list_csv_files(x)) + + answer_key = pd.read_csv(answer_key_location) + + answer_key = answer_key.rename( + columns={ + 'metadataid': 'metadata_id', + 'query_id': 'qid', + 'gold_hypothesis': 'gold_hypothesis', + } + ) + + df['qid'] = df['qid'].astype(int) + df['metadata_id'] = df['metadata_id'].astype(int) + + answer_key['qid'] = answer_key['qid'].astype(int) + answer_key['metadata_id'] = answer_key['metadata_id'].astype(int) + + df = pd.merge(df, answer_key, on=['dataset', 'metadata_id', 'qid'], how='left') + + return df if __name__ == '__main__': From cfe39b6d4d679687802f73b0db82bbd41a240bca Mon Sep 17 00:00:00 2001 From: Abhijeetsingh Meena Date: Wed, 30 Oct 2024 14:25:22 +0530 Subject: [PATCH 03/15] feat(eval): implement process_instance function Signed-off-by: Abhijeetsingh Meena --- evaluation/discoverybench/run_infer.py | 198 ++++++++++++++++++++++++- 1 file changed, 197 insertions(+), 1 deletion(-) diff --git a/evaluation/discoverybench/run_infer.py b/evaluation/discoverybench/run_infer.py index 495c53483208..eddf92fb4e3b 100644 --- a/evaluation/discoverybench/run_infer.py +++ b/evaluation/discoverybench/run_infer.py @@ -1,3 +1,4 @@ +import asyncio import json import os @@ -5,19 +6,214 @@ import pandas as pd from evaluation.utils.shared import ( + EvalMetadata, + EvalOutput, + codeact_user_response, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) +from openhands.controller.state.state import State from openhands.core.config import ( + AppConfig, + SandboxConfig, get_llm_config_arg, parse_arguments, ) +from openhands.core.logger import openhands_logger as logger +from openhands.core.main import create_runtime, run_controller +from openhands.events.action import MessageAction +from openhands.utils.async_utils import call_async_from_sync DATA_FILES = {} +AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = { + 'CodeActAgent': codeact_user_response, +} + +AGENT_CLS_TO_INST_SUFFIX = { + 'CodeActAgent': 'When you think you have fixed the issue through code changes, please run the following command: exit .\n' +} + + +def get_config( + metadata: EvalMetadata, +) -> AppConfig: + config = AppConfig( + default_agent=metadata.agent_class, + run_as_openhands=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + base_container_image='python:3.12-bookworm', + enable_auto_lint=True, + use_host_network=False, + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + +def get_dv_query_for_real( + datasets, question, domain_knowledge=None, workflow_tags=None +): + """ + Prepare a structured query for the agent to execute on the specified datasets. + + This function constructs a query by compiling metadata from the provided datasets, along with any relevant domain knowledge and workflow tags. + + Args: + datasets: List of datasets + question: Query to be answered + domain_knowledge: Domain knowledge if any + workflow_tags: Workflow tags if any + + Returns: + query_to_dv: Query to be run on the dataset + dataset_meta: Metadata of the dataset + """ + + dataset_meta = '' + for dataset_metadata in datasets: + dataset_meta += 'Dataset name: ' + dataset_metadata['name'] + dataset_meta += 'Dataset description: ' + dataset_metadata['description'] + dataset_meta += '\nBrief description of columns: ' + for col in dataset_metadata['columns']['raw']: + dataset_meta += col['name'] + ': ' + col['description'] + ', ' + + query_to_dv = dataset_meta + + query_to_dv += f'\nQuery: {question}' + + if domain_knowledge: + query_to_dv += ( + '\nAdditionally, we provide some hints that might be useful to solve the task. Domain Knowledge: \n' + + domain_knowledge + + '.\n' + ) + + if workflow_tags: + query_to_dv += 'The meta tags are: ' + workflow_tags + '.\n' + + query_to_dv += ( + 'In the final answer, please write down a scientific hypothesis in ' + 'natural language, derived from the provided dataset, clearly stating the ' + 'context of hypothesis (if any), variables chosen (if any) and ' + 'relationship between those variables (if any) including any statistical significance.' + 'Also generate a summary of the full workflow starting from data loading that led to the final answer as WORKFLOW SUMMARY:' + ) + + # Run the NL query through datavoyager + return query_to_dv, dataset_meta + + +def initialize_runtime(runtime, data_files): ... + + +def complete_runtime(state: State): ... + + +def process_instance( + instance: pd.Series, + metadata: EvalMetadata, + reset_logger: bool = True, +): + """ + Process and evaluate a single instance of the dataset. + + This function executes the OpenHands agent + for a specific instance of the dataset. It retrieves + the agent's results and evaluates them against the gold + hypothesis. + + Args: + instance: A single row of the dataset + metadata: Metadata for the evaluation + reset_logger: Whether to reset the logger + + Returns: + output: EvalOutput object + """ + + config = get_config(metadata) + + # use a session id for concurrent evaluation + sid = 'ID_' + str(instance.instance_id) + + # Setup the logger properly, so you can run + # multi-processing to parallelize the evaluation + if reset_logger: + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, instance.instance_id, log_dir) + else: + logger.info(f'Starting evaluation for instance {instance.instance_id}.') + + problem_statement, dataset_metadata = get_dv_query_for_real( + datasets=instance.datasets, + question=instance.query, + domain_knowledge=instance.domain_knowledge, + workflow_tags=instance.workflow_tags, + ) + + # Prepare instruction + instruction = ( + f'You are a discovery agent who can execute a python code only once to answer a query based on one or more datasets. The datasets will be present in the current directory.\n\n' + 'Environment has been set up for you to start working. You may assume all necessary tools and datasets are installed.\n\n' + '# Problem Statement\n' + f'{problem_statement}\n\n' + ) + instruction += ( + 'IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\n' + 'You should NOT modify any existing test case files. If needed, you can add new test cases in a NEW file to reproduce the issue.\n' + 'You SHOULD INCLUDE PROPER INDENTATION in your edit commands.\n' + ) + # NOTE: You can actually set slightly different instruction for different agents + instruction += AGENT_CLS_TO_INST_SUFFIX[metadata.agent_class] + + # Here's how you can run the agent (similar to the `main` function) and get the final task state + runtime = create_runtime(config, sid=sid) + call_async_from_sync(runtime.connect) + initialize_runtime(runtime, instance.data_files) + + state: State | None = asyncio.run( + run_controller( + config=config, + initial_user_action=MessageAction(content=instruction), + runtime=runtime, + fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get( + metadata.agent_class + ), + ) + ) + + if state is None: + raise ValueError('State should not be None.') + + metrics = state.metrics.get() if state.metrics else None + test_result = complete_runtime(state) + + # history is now available as a stream of events, rather than list of pairs of (Action, Observation) + # for compatibility with the existing output format, we can remake the pairs here + # remove when it becomes unnecessary + histories = state.history.compatibility_for_eval_history_pairs() + + # TODO: add discoverybench evaluation + + output = EvalOutput( + instance_id=str(instance.instance_id), + instruction=instruction, + metadata=metadata, + history=histories, + metrics=metrics, + error=state.last_error if state and state.last_error else None, + test_result=test_result, + ) -def process_instance(instance, metadata, output_file): ... + return output def update_csv_name(name): From 33071aabfa0e4e4526f28dc2ee0d407c905d237f Mon Sep 17 00:00:00 2001 From: Abhijeetsingh Meena Date: Wed, 30 Oct 2024 14:29:15 +0530 Subject: [PATCH 04/15] feat(eval): initialize docker runtime with necessary python libraries Signed-off-by: Abhijeetsingh Meena --- evaluation/discoverybench/run_infer.py | 47 ++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/evaluation/discoverybench/run_infer.py b/evaluation/discoverybench/run_infer.py index eddf92fb4e3b..2f0029292113 100644 --- a/evaluation/discoverybench/run_infer.py +++ b/evaluation/discoverybench/run_infer.py @@ -23,11 +23,23 @@ ) from openhands.core.logger import openhands_logger as logger from openhands.core.main import create_runtime, run_controller -from openhands.events.action import MessageAction +from openhands.events.action import CmdRunAction, MessageAction +from openhands.events.observation import CmdOutputObservation +from openhands.runtime.base import Runtime from openhands.utils.async_utils import call_async_from_sync DATA_FILES = {} +LIBRARIES = [ + 'pandas', + 'numpy', + 'scipy', + 'matplotlib', + 'seaborn', + 'scikit-learn', + 'statsmodels', +] + AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = { 'CodeActAgent': codeact_user_response, } @@ -111,7 +123,38 @@ def get_dv_query_for_real( return query_to_dv, dataset_meta -def initialize_runtime(runtime, data_files): ... +def initialize_runtime(runtime: Runtime, data_files: list[str]): + """ + Initialize the runtime for the agent. + + This function is called before the runtime is used to run the agent. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Initialization Fn {'-' * 50}") + obs: CmdOutputObservation + + action = CmdRunAction(command='mkdir -p /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = runtime.run_action(action) + assert obs.exit_code == 0 + + action = CmdRunAction(command='cd /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = runtime.run_action(action) + assert obs.exit_code == 0 + + for file in data_files: + runtime.copy_to( + file, + '/workspace', + ) + + for lib in LIBRARIES: + action = CmdRunAction(command=f'pip install {lib}') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = runtime.run_action(action) + assert obs.exit_code == 0 + + logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}") def complete_runtime(state: State): ... From a585cee1e2a6cbde38f8bc11383597a7e95ea860 Mon Sep 17 00:00:00 2001 From: Abhijeetsingh Meena Date: Wed, 30 Oct 2024 14:32:39 +0530 Subject: [PATCH 05/15] feat(eval): implement complete_runtime function Signed-off-by: Abhijeetsingh Meena --- evaluation/discoverybench/run_infer.py | 57 +++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 2 deletions(-) diff --git a/evaluation/discoverybench/run_infer.py b/evaluation/discoverybench/run_infer.py index 2f0029292113..ae1d5542baea 100644 --- a/evaluation/discoverybench/run_infer.py +++ b/evaluation/discoverybench/run_infer.py @@ -23,7 +23,7 @@ ) from openhands.core.logger import openhands_logger as logger from openhands.core.main import create_runtime, run_controller -from openhands.events.action import CmdRunAction, MessageAction +from openhands.events.action import AgentFinishAction, CmdRunAction, MessageAction from openhands.events.observation import CmdOutputObservation from openhands.runtime.base import Runtime from openhands.utils.async_utils import call_async_from_sync @@ -157,7 +157,60 @@ def initialize_runtime(runtime: Runtime, data_files: list[str]): logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}") -def complete_runtime(state: State): ... +def extract_gen_hypo_from_logs(content: str): ... + + +def get_last_agent_finish_action(state: State) -> AgentFinishAction: + for event in state.history.get_events(reverse=True): + if isinstance(event, AgentFinishAction): + return event + return None + + +def get_last_message_action(state: State) -> MessageAction: + for event in state.history.get_events(reverse=True): + if isinstance(event, MessageAction): + return event + return None + + +def complete_runtime(state: State): + last_agent_finish_action = get_last_agent_finish_action(state) + last_agent_message_action = get_last_message_action(state) + + if last_agent_finish_action is not None: + final_message_1 = last_agent_finish_action.thought + gen_hypo_1, gen_workflow_1, error_1 = extract_gen_hypo_from_logs( + final_message_1 + ) + else: + gen_hypo_1, gen_workflow_1, error_1 = '', '', '' + + if last_agent_message_action is not None: + final_message_2 = last_agent_message_action.content + gen_hypo_2, gen_workflow_2, error_2 = extract_gen_hypo_from_logs( + final_message_2 + ) + else: + gen_hypo_2, gen_workflow_2, error_2 = '', '', '' + + if gen_hypo_1 and gen_hypo_2: + test_result = { + 'gen_hypo': last_agent_finish_action.thought + if last_agent_finish_action + else last_agent_message_action.content, + 'gen_workflow': '', + 'error': '', + } + return test_result + + test_result = { + 'gen_hypo': gen_hypo_1 if gen_hypo_1 else gen_hypo_2, + 'gen_workflow': gen_workflow_1 if gen_workflow_1 else gen_workflow_2, + 'error': error_1 if error_1 else error_2, + } + + return test_result def process_instance( From 98efba480d283d9a04e224d180af99e2544f75a1 Mon Sep 17 00:00:00 2001 From: Abhijeetsingh Meena Date: Wed, 30 Oct 2024 14:36:53 +0530 Subject: [PATCH 06/15] feat(eval): add response parser for DiscoveryBench evaluation Signed-off-by: Abhijeetsingh Meena --- .../eval_utils/response_parser.py | 52 +++++++++++++++++++ evaluation/discoverybench/run_infer.py | 6 +-- 2 files changed, 55 insertions(+), 3 deletions(-) create mode 100644 evaluation/discoverybench/eval_utils/response_parser.py diff --git a/evaluation/discoverybench/eval_utils/response_parser.py b/evaluation/discoverybench/eval_utils/response_parser.py new file mode 100644 index 000000000000..b5de82b5df9e --- /dev/null +++ b/evaluation/discoverybench/eval_utils/response_parser.py @@ -0,0 +1,52 @@ +workflow_summary_markers = [ + 'WORKFLOW SUMMARY', + 'WORKFLOW_SUMMARY', + 'WORKFLOW-SUMMARY', + 'Workflow Summary', +] + +final_answer_markers = [ + 'FINAL ANSWER', + 'FINAL_ANSWER', + 'FINAL-ANSWER', + 'Final Answer', + 'Scientific Hypothesis', + 'Hypothesis', +] + +next_agent_markers = [ + 'NEXT AGENT', + 'NEXT-AGENT', + 'NEXT_AGENT', + 'FEEDBACK', +] + + +def extract_between(content, start_markers, end_markers=None): + for marker in start_markers: + if marker in content: + result = content.split(marker, 1)[1] + if end_markers: + for end_marker in end_markers: + if end_marker in result: + result = result.split(end_marker, 1)[0] + return result + return '' + + +def extract_gen_hypo_from_logs(content: str): + error = '' + + gen_workflow = extract_between( + content, workflow_summary_markers, final_answer_markers + ) + + if not gen_workflow: + error += 'No Workflow Summary found in the line. | ' + + gen_hypothesis = extract_between(content, final_answer_markers, next_agent_markers) + + if not gen_hypothesis: + error += 'No Final Answer in the line.' + + return gen_hypothesis, gen_workflow, error diff --git a/evaluation/discoverybench/run_infer.py b/evaluation/discoverybench/run_infer.py index ae1d5542baea..0566b56d20e0 100644 --- a/evaluation/discoverybench/run_infer.py +++ b/evaluation/discoverybench/run_infer.py @@ -5,6 +5,9 @@ import git import pandas as pd +from evaluation.discoverybench.eval_utils.response_parser import ( + extract_gen_hypo_from_logs, +) from evaluation.utils.shared import ( EvalMetadata, EvalOutput, @@ -157,9 +160,6 @@ def initialize_runtime(runtime: Runtime, data_files: list[str]): logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}") -def extract_gen_hypo_from_logs(content: str): ... - - def get_last_agent_finish_action(state: State) -> AgentFinishAction: for event in state.history.get_events(reverse=True): if isinstance(event, AgentFinishAction): From 8140aae2ce1e99f62572923ef0189c2892134fae Mon Sep 17 00:00:00 2001 From: Harshit Surana Date: Wed, 30 Oct 2024 14:47:04 +0530 Subject: [PATCH 07/15] docs(eval): Add README for discoverybench --- evaluation/discoverybench/README.md | 38 +++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 evaluation/discoverybench/README.md diff --git a/evaluation/discoverybench/README.md b/evaluation/discoverybench/README.md new file mode 100644 index 000000000000..9b5d5df495c7 --- /dev/null +++ b/evaluation/discoverybench/README.md @@ -0,0 +1,38 @@ +# DiscoveryBench with OpenHands + +[DiscoveryBench](https://github.com/allenai/discoverybench/) [(Paper)](https://arxiv.org/abs/2407.01725v1) contains 264 tasks collected across 6 diverse domains, such as biology, economics, and sociology. It incorporates discovery workflows from published papers to approximate the real-world challenges faced by researchers. + +

+ + DiscoveryBench Background + +

+ + +## Setup Environment and LLM Configuration + +1. Please follow instructions mentioned [here](https://github.com/openlocus/OpenHands/blob/discoverybench-openhands-integration/evaluation/README.md#setup) to setup OpenHands development environment and LLMs locally + +2. Execute the bash script to start DiscoveryBench Evaluation + +``` +./evaluation/discoverybench/scripts/run_infer.sh [YOUR MODEL CONFIG] +``` +Replace `[YOUR MODEL CONFIG]` with any model the model that you have set up in `config.toml` + + +## Run Inference on DiscoveryBench Instances + +When the `run_infer.sh` script is started, it will automatically pull the latest DiscoveryBench instances & set up the agent environment. The OpenHands agent is invoked to process the task within this environment, producing a hypothesis. We then evaluate it against the “gold” hypothesis provided by DiscoveryBench. The evaluation result, along with the agent chat history is logged to `output.jsonl` under `evaluation_outputs`. + + +``` +./evaluation/discoverybench/scripts/run_infer.sh [MODEL_CONFIG] [GIT_COMMIT] [AGENT] [EVAL_LIMIT] [NUM_WORKERS] +``` + +- `MODEL_CONFIG`: Name of the model you want to evaluate with +- `GIT_COMMIT`: This should be the git commit hash or release tag for OpenHands, e.g., HEAD or a specific tag like 0.6.2. +- `AGENT`: Use CoderActAgent, right now it only supports that. +- `EVAL_LIMIT`: Number of samples to evaluate. +- `NUM_WORKERS`: Number of workers to parallelize the evaluation process. + From 477eb849b7c2a482447be13523edb799f728097f Mon Sep 17 00:00:00 2001 From: Harshit Surana Date: Wed, 30 Oct 2024 14:55:44 +0530 Subject: [PATCH 08/15] docs(eval): Add README for DiscoveryBench eval utils --- evaluation/discoverybench/eval_utils/README.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 evaluation/discoverybench/eval_utils/README.md diff --git a/evaluation/discoverybench/eval_utils/README.md b/evaluation/discoverybench/eval_utils/README.md new file mode 100644 index 000000000000..13c98ebaa8d2 --- /dev/null +++ b/evaluation/discoverybench/eval_utils/README.md @@ -0,0 +1,7 @@ +## DiscoveryBench Evaluation Utils + +- **`eval_w_subhypo_gen.py`**: Implements the DiscoveryBench logic for evaluating agent-generated hypotheses. +- **`lm_utils.py`**: Provides utility functions necessary for the evaluation process. +- **`openai_helpers.py`**: Includes helper functions for OpenAI-related tasks. +- **`openai_semantic_gen_prompts.py`**: Contains prompts used for semantic generation. +- **`response_parser.py`**: Handles the parsing of agent-generated hypotheses. From f1bf06c8df2a00bab3c8902cda95ffd4ab6f2390 Mon Sep 17 00:00:00 2001 From: Abhijeetsingh Meena Date: Wed, 30 Oct 2024 15:02:25 +0530 Subject: [PATCH 09/15] refactor(eval): integrate DiscoveryBench evaluation and update scripts for linting compliance Signed-off-by: Abhijeetsingh Meena --- .../discoverybench/eval_utils/__init__.py | 0 .../eval_utils/eval_w_subhypo_gen.py | 538 ++++++++++++++++++ .../discoverybench/eval_utils/lm_utils.py | 64 +++ .../eval_utils/openai_helpers.py | 190 +++++++ .../eval_utils/openai_semantic_gen_prompts.py | 151 +++++ evaluation/discoverybench/run_infer.py | 19 +- 6 files changed, 961 insertions(+), 1 deletion(-) create mode 100644 evaluation/discoverybench/eval_utils/__init__.py create mode 100644 evaluation/discoverybench/eval_utils/eval_w_subhypo_gen.py create mode 100644 evaluation/discoverybench/eval_utils/lm_utils.py create mode 100644 evaluation/discoverybench/eval_utils/openai_helpers.py create mode 100644 evaluation/discoverybench/eval_utils/openai_semantic_gen_prompts.py diff --git a/evaluation/discoverybench/eval_utils/__init__.py b/evaluation/discoverybench/eval_utils/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/evaluation/discoverybench/eval_utils/eval_w_subhypo_gen.py b/evaluation/discoverybench/eval_utils/eval_w_subhypo_gen.py new file mode 100644 index 000000000000..a80df8279cfb --- /dev/null +++ b/evaluation/discoverybench/eval_utils/eval_w_subhypo_gen.py @@ -0,0 +1,538 @@ +import json +import logging + +from openai import OpenAI + +from .lm_utils import run_chatgpt_query_multi_turn +from .openai_helpers import get_response + +logging.basicConfig( + format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt='%m/%d/%Y %H:%M:%S', + level=logging.INFO, +) +logger = logging.getLogger(__name__) + + +def get_score_from_answer(type, answer): + if type == 'context': + answer = answer.replace('Answer:', '').strip() + if answer.startswith('A)'): + return 1.0 + elif answer.startswith('B)'): + return 0.0 + return -1.0 + + elif type == 'var': + try: + var_json = json.loads(answer) + # print(f"var_json:{var_json}") + p = 0.0 + r = 0.0 + f1 = 0.0 + if var_json['sizeB']: + p = var_json['intersection'] / var_json['sizeB'] + if var_json['sizeA']: + r = var_json['intersection'] / var_json['sizeA'] + if p > 0.0 and r > 0.0: + f1 = (2 * p * r) / (p + r) + else: + f1 = 0.0 + eval_rec = { + 'p': p, + 'r': r, + 'f1': f1, + 'sizeA': var_json['sizeA'], + 'sizeB': var_json['sizeB'], + 'intersection': var_json['intersection'], + 'explanation': var_json['explanation'], + } + print(f'var_eval: {eval_rec}') + return eval_rec + except Exception: # COMMENT: added Exception + return {'p': -1.0, 'r': -1.0, 'f1': -1.0} + elif type == 'rel': + print(answer) + rel_json = json.loads(answer) + answer_str = rel_json['answer'].strip() + if answer_str.startswith('A') or 'very similar' in answer_str: + return 1.0 + elif ( + answer_str.startswith('B') or 'similar but general than HypoA' in answer_str + ): + return 0.5 + elif answer_str.startswith('C') or 'different' in answer_str: + return 0.0 + return -1.0 + return -1.0 + + +def ask_dimension_question( + query, + gold_hypo, + gold_workflow, + gen_hypo, + gen_workflow, + dataset_meta, + llm_used, + dimension, + dataset_type, + use_column_metadata=True, +): + dimension_question = '' + answer = '' + score = 0.0 + if dimension == 'var': + score = {'p': -1.0, 'r': -1.0, 'f1': -1.0} + num_tokens = 256 + num_retries = 1 + json_response = False + + messages = [ + { + 'role': 'system', + 'content': 'You are an AI assistant that helps evaluate a data-driven hypothesis. You are a helpful assistant who is not talkative. You only respond with the exact answer to a query without additional conversation.', + }, + ] + if dimension == 'context': + dimension_question = """\ + Question: Is HypoB defined in the same context as HypoA? + (Context refers to assumptions/stratification under which the hypotheses are defined.) + Options: A) same B) different + What is your answer?""" + elif dimension == 'var': + dimension_question = """\ + Question: For both HypoA and HypoB, what are the different variables found in the hypotheses? \ + Return your answer as a JSON object in the following format: + ```json + {{ + "sizeA": num of variables used in HypoA + "sizeB": num of variables used in HypoB + "intersection": num of variables common in HypoA and HypoB. Use *fuzzy matching* to determine intersection, accounting for paraphrases or slightly different surface forms + "explanation": a short text explanation about the variables + }}``` + Answer:""" + num_tokens = 512 + num_retries = 1 + json_response = True + elif dimension == 'rel': + dimension_question = """\ + Question: Does HypoB exhibit the same relation as HypoA? + Compare using following example hierarchy of relationships (based on specificity): \ + "there exists a relationship" > "positive relationship" > "positive AND (linear OR quadratic)" > "positive AND linear". + Options: A) very similar B) similar but general than HypoA C) different + Return your answer as a JSON object in the following format: + ```json + {{ + "answer": one of the options from A) very similar B) similar but general than HypoA C) different + "explanation": a short text explanation about the relationship comparison + }}``` + Answer:""" + num_tokens = 512 + num_retries = 1 + json_response = True + + datasets_json = prepare_dataset_metadata_json( + dataset_meta, dataset_type=dataset_type, use_column_metadata=use_column_metadata + ) + + dimension_question_str = f"""\ + You are going to compare two natural-language hypotheses HypoA and HypoB accompanied with optional workflows: WorkflowA for HypoA and WorkflowB for HypoB. \ + Both the hypotheses answer the natural language query "QUERY" over the dataset(s) described by dataset description(s) and column description(s) below. \ + Compare HypoA and HypoB in terms of three aspects: Contexts, Variables, and Relations. \ + E.g., for the hypothesis "From 1995 to 2009, the number of sandhill cranes around the tundra (Indigilka River) surged by an astounding ~10X": + * Contexts refer to stratification of the data under which the given hypothesis is True. E.g., "For all women", "From 1995 to 2009". + * Variables refer to the set of variables (either dependent or independent) that are mentioned in the hypothesis. E.g., number of sandhill cranes, location. + * Relations refer to the form of relation between the variables. E.g., "surged by ~10x". + + Answer following questions for a given pair of hypotheses, HypoA and HypoB, along with an explanation grounded on the QUERY and the DATASET(S). + + Here is the metadata for the task: + ```json + {{ + "datasets": {datasets_json}, + "query": {query}, + "HypoA": {gold_hypo}, + "WorkflowA": {gold_workflow}, + "HypoB": {gen_hypo}, + "WorkflowB": {gen_workflow} + }} + ``` + + {dimension_question}""" + + messages.append({'role': 'user', 'content': dimension_question_str}) + for retry in range(num_retries): + response = run_chatgpt_query_multi_turn( + messages=messages, + model_name=llm_used, + max_tokens=num_tokens, + temperature=0, # 0 for greedy best decoding + json_response=json_response, + ) + if response is not None: # COMMENT: changed from != to is not + break + + if response is not None: # COMMENT: changed from != to is not + answer = response.choices[0].message.content.strip() + score = get_score_from_answer(type=dimension, answer=answer) + + return dimension_question, answer, score + + +def prepare_dataset_metadata_json(dataset_meta, dataset_type, use_column_metadata=True): + if dataset_meta is None: # COMMENT: changed from == to is None + return [ + { + 'dataset_description': '', + 'columns': [], + } + ] + datasets_json = [] + if dataset_type == 'real': + for d in dataset_meta['datasets']: + datasets_json.append( + { + 'dataset_description': d['description'], + 'columns': [ + {'name': col['name'], 'description': col['description']} + for col in d['columns']['raw'] + ] + if use_column_metadata + else [], + } + ) + else: + for d in dataset_meta['datasets']: + datasets_json.append( + { + 'dataset_description': d['description'], + 'columns': [ + {'name': col['name'], 'description': col['description']} + for col in d['columns'] + ] + if use_column_metadata + else [], + } + ) + return datasets_json + + +def get_sub_hypotheses( + query, + hypo, + workflow, + dataset_meta, + llm_used, + dataset_type, + use_column_metadata=True, +): + client = OpenAI() + extraction_prompt = """\ + Given a set of dataset columns, a ground-truth hypothesis, and the analysis workflow used, your task is to extract three dimensions that define the hypothesis: Context, Variables, and Relations. \ + Here are the definitions for these dimensions: + - Contexts: Boundary conditions that limit the scope of a hypothesis. E.g., “for men over \ + the age of 30”, “in Asia and Europe”. If the context applies to the full dataset, then extract the context from the dataset_descrption. + - Variables: Known concepts that interact in a meaningful way under a given context to \ + produce the hypothesis. E.g., gender, age, income, or "None" if there is no interacting variable. + - Relations: Interactions between a given set of variables under a given context to produce \ + the hypothesis. E.g., “quadratic relationship”, “inversely proportional”, piecewise conditionals, \ + or "None" if there is no interacting relationship. + Make sure to only use the information present in the hypothesis and the workflow. Do not add any new information. \ + For each dimension, be specific, and do not omit any important details. + + Here is the metadata for the task: + ```json + { + "datasets": %s, + "hypothesis": "%s", + "workflow": "%s" + } + ``` + + Return your answer as a JSON object in the following format: + ```json + { + "sub_hypo": [ + { + "text": the hypothesis in natural language, + "context": a short text description of the context of the hypothesis, + "variables": a list of columns involved in the hypothesis, + "relations": a short text description of the relationship between the variables of the hypothesis + }, + ... + ] + }``` + """ + datasets_json = prepare_dataset_metadata_json( + dataset_meta, dataset_type, use_column_metadata=use_column_metadata + ) + _prompt = extraction_prompt % (datasets_json, hypo, workflow) + sub_hypo_json = get_response(client, _prompt, model=llm_used, max_retry=1) + + if sub_hypo_json is not None: # COMMENT: changed from != to is not + # print(f"full hypothesis: {hypo}") + print(f'sub_hypo_json: {sub_hypo_json}') + else: + sub_hypo_json = { + 'sub_hypo': [], + } + + sub_hypo_json['full_hypo'] = hypo + + return sub_hypo_json + + +def match_context_with_gpt( + gold_hyp, gold_context, pred_hyp, pred_context, model='gpt-3.5-turbo' +): + prompt = f"""\ + Given a gold hypothesis, a gold context, a predicted hypothesis, and a predicted context, your task is \ + to determine if the predicted context semantically matches the ground-truth context. \ + Here is the definition for Context: Boundary conditions that limit the scope of a sub-hypothesis. E.g., “for men over the age of 30”, “in Asia and Europe”. If the context applies to the full dataset, then the context is derived from the dataset_descrption. \ + Here is the definition for Context: Boundary conditions that limit the scope of a sub-hypothesis. E.g., “for men over the age of 30”, “in Asia and Europe”. If the context applies to the full dataset, then the context is derived from the dataset_descrption. \ + If the predicted context matches the gold context, return true, otherwise return false. + If both gold and predicted hypotheses are defined over the context of the full dataset, then also return true. + If both gold and predicted hypotheses are defined over the context of the full dataset, then also return true. + + Here is the metadata for the task: + ```json + {{ + "gold_hypothesis": "{gold_hyp}", + "gold_context": "{gold_context}", + "predicted_hypothesis": "{pred_hyp}", + "predicted_context": "{pred_context}" + }} + ``` + + Return your answer as a JSON object in the following format: + ```json + {{ + "match": true or false + }} + ```""" + + client = OpenAI() + output = get_response(client, prompt, model=model) + return output.get('match', False) + + +def is_matching_context(gold_hyp, gold_context, pred_hyp, pred_context, llm_used): + if gold_context == pred_context: + return True + if 'None' in [gold_context, pred_context]: + return False + return match_context_with_gpt( + gold_hyp, gold_context, pred_hyp, pred_context, model=llm_used + ) + + +def run_eval_gold_vs_gen_NL_subhypo( + query, + gold_hypo, + gold_workflow, + gen_hypo, + gen_workflow, + dataset_meta, + llm_used, + context_score, + dataset_type, + use_column_metadata=True, +): + # GPT-4 based evaluation to evaluate generated hypothesis in terms of context, variables, relation + + eval_rec = { + 'query': query, + 'HypoA': gold_hypo, + 'WorkflowA': gold_workflow, + 'HypoB': gen_hypo, + 'WorkflowB': gen_workflow, + } + + for dimension in ['var', 'rel']: + question, answer, score = ask_dimension_question( + query, + gold_hypo, + gold_workflow, + gen_hypo, + gen_workflow, + dataset_meta, + llm_used, + dimension=dimension, + dataset_type=dataset_type, + use_column_metadata=use_column_metadata, + ) + + eval_rec[dimension] = {'question': question, 'answer': answer, 'score': score} + + eval_rec['context'] = context_score + eval_rec['accuracy_score'] = ( + 1.0 + * eval_rec['context']['score'] + * eval_rec['var']['score']['f1'] + * eval_rec['rel']['score'] + ) + + return eval_rec + + +def run_eval_gold_vs_gen_NL_hypo_workflow( + query, + gold_hypo, + gold_workflow, + gen_hypo, + gen_workflow, + dataset_meta, + llm_used, + dataset_type, + use_column_metadata=True, +): + # Input: Dataset Metadata, Query, Gold {Hg, Wg}, Predicted {Hp, Wp} + # Output: eval_rec json includes final_score + + # Procedure: + # Dataset Metadata, Query, Gold {Hg, Wg}, Pred {Hg, Wg} + # Gold: [Hg1, Hg2] (compute on the fly) Hg1 is a NL form of subhypothesis + # Predicted: [Hp1, Hp2] (compute on the fly) + + # Compute Intersection: [(Hg_i, Hp_j), …] # tuples of (gold,pred) that matched with context (do this w/o explicit extraction) + # # filter so that a gold context and a predicted context are only attached to one tuple + # Compute recall_context (programmatically) + + # r_v_list = [] + # For (Hg_i, Hp_j) in the intersection: + # With Hg_i, Hp_j in NL, ask GPT4 → #variables and #intersection and a paragraph explanation and programmatically calculate f1_v + # Hg_i, Hp_j in NL, ask GPT4 → matching score (0, 0.5 or 1) : A) very similar B) similar but general than HypoA C) different + explanation + # r_v_list ← f1_v * score_r + # accuracy_score = mean(r_v_list) + # score = [ recall_context * mean over predicted context(context_score * var_score *rel_score )] + + # recall_context = 1.0 # COMMENT: never used + eval_rec = { + 'query': query, + 'HypoA': gold_hypo, + 'WorkflowA': gold_workflow, + 'HypoB': gen_hypo, + 'WorkflowB': gen_workflow, + } + + gold_sub_hypo_json = get_sub_hypotheses( + query=query, + hypo=gold_hypo, + workflow=gold_workflow, + dataset_meta=dataset_meta, + llm_used=llm_used, + dataset_type=dataset_type, + use_column_metadata=use_column_metadata, + ) + if len(gold_sub_hypo_json['sub_hypo']) == 0: + gold_sub_hypo_json['sub_hypo'] = [ + { + 'text': gold_hypo, + 'context': 'None', + 'variables': [], + 'relations': '', + 'explanation': 'unable to segment', + } + ] + print(f'gold_sub_hypo_json: {gold_sub_hypo_json}') + + gen_sub_hypo_json = get_sub_hypotheses( + query=query, + hypo=gen_hypo, + workflow=gen_workflow, + dataset_meta=dataset_meta, + llm_used=llm_used, + dataset_type=dataset_type, + use_column_metadata=use_column_metadata, + ) + if len(gen_sub_hypo_json['sub_hypo']) == 0: + gen_sub_hypo_json['sub_hypo'] = [ + { + 'text': gen_hypo, + 'context': 'None', + 'variables': [], + 'relations': '', + 'explanation': 'unable to segment', + } + ] + print(f'gen_sub_hypo_json: {gen_sub_hypo_json}') + + eval_rec['gold_sub_hypo'] = gold_sub_hypo_json + eval_rec['gen_sub_hypo'] = gen_sub_hypo_json + + gold_subh_covered = [] + gen_subh_to_gold_subh = dict() + gen_gold_subh_to_context = dict() + + for p_id, gen_subh in enumerate(gen_sub_hypo_json['sub_hypo']): + gen_subh_to_gold_subh[p_id] = -1 + + for g_id, gold_subh in enumerate(gold_sub_hypo_json['sub_hypo']): + if g_id in gold_subh_covered: + continue + + # match context + context_bool = is_matching_context( + gold_subh['text'], + gold_subh.get('context', ''), + gen_subh['text'], + gen_subh.get('context', ''), + llm_used, + ) + if context_bool: + context_score = 1.0 + else: + context_score = 0.0 + + if context_score == 1.0: # match only when context_score = 1.0 + gen_subh_to_gold_subh[p_id] = g_id + gold_subh_covered.append(g_id) + gen_gold_subh_to_context[f'P{p_id}||G{g_id}'] = { + 'question': f"""Comapring: GoldH: {gold_subh["text"]}, GoldC: {gold_subh['context']}\nGenH: {gen_subh['text']}, GenC: {gen_subh['context']}""", + 'answer': context_bool, + 'score': context_score, + } + break + + print(f'gen_subh_to_gold_subh: {gen_subh_to_gold_subh}') + eval_rec['gen_subh_to_gold_subh'] = gen_subh_to_gold_subh + eval_rec['gold_subh_covered'] = gold_subh_covered + matched_gold_gen_subh_evals = dict() + sum_accuracy_score = 0.0 + for p_id, g_id in gen_subh_to_gold_subh.items(): + if g_id >= 0: + key = f'P{p_id}||G{g_id}' + context_score = gen_gold_subh_to_context[key] + subh_eval_rec = run_eval_gold_vs_gen_NL_subhypo( + query, + gold_hypo, + gold_workflow, + gen_hypo, + gen_workflow, + dataset_meta, + llm_used, + context_score, + dataset_type=dataset_type, + use_column_metadata=use_column_metadata, + ) + sum_accuracy_score += subh_eval_rec['accuracy_score'] + matched_gold_gen_subh_evals[key] = subh_eval_rec + + eval_rec['matched_gold_gen_subh_evals'] = matched_gold_gen_subh_evals + eval_rec['recall_context'] = ( + len(gold_subh_covered) / len(gold_sub_hypo_json['sub_hypo']) + if len(gold_sub_hypo_json['sub_hypo']) + else 0.0 + ) + mean_accuracy_score = ( + sum_accuracy_score / len(gen_subh_to_gold_subh) + if len(gen_subh_to_gold_subh) + else 0.0 + ) + eval_rec['mean_accuracy_score'] = mean_accuracy_score + final_score = eval_rec['recall_context'] * mean_accuracy_score + eval_rec['final_score'] = final_score + print(f'eval_rec: {json.dumps(eval_rec, indent=2)}') + + return eval_rec diff --git a/evaluation/discoverybench/eval_utils/lm_utils.py b/evaluation/discoverybench/eval_utils/lm_utils.py new file mode 100644 index 000000000000..10486ee82294 --- /dev/null +++ b/evaluation/discoverybench/eval_utils/lm_utils.py @@ -0,0 +1,64 @@ +import os +import sys +import time + +from openai import OpenAI +from tenacity import ( + retry, + stop_after_attempt, # type: ignore + wait_random_exponential, # type: ignore +) + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + + +Model = Literal['gpt-4', 'gpt-3.5-turbo', 'text-davinci-003'] + +OpenAI.api_key = os.getenv('OPENAI_API_KEY') +OPENAI_GEN_HYP = { + 'temperature': 0, + 'max_tokens': 250, + 'top_p': 1.0, + 'frequency_penalty': 0, + 'presence_penalty': 0, +} + + +@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6)) +def run_chatgpt_query_multi_turn( + messages, + model_name='gpt-4-turbo', # pass "gpt4" for more recent model output + max_tokens=256, + temperature=0.0, + json_response=False, +): + response = None + num_retries = 3 + retry = 0 + while retry < num_retries: + retry += 1 + try: + client = OpenAI() + + if json_response: + response = client.chat.completions.create( + model=model_name, + response_format={'type': 'json_object'}, + messages=messages, + **OPENAI_GEN_HYP, + ) + else: + response = client.chat.completions.create( + model=model_name, messages=messages, **OPENAI_GEN_HYP + ) + break + + except Exception as e: + print(e) + print('GPT error. Retrying in 2 seconds...') + time.sleep(2) + + return response diff --git a/evaluation/discoverybench/eval_utils/openai_helpers.py b/evaluation/discoverybench/eval_utils/openai_helpers.py new file mode 100644 index 000000000000..95ab23cf9c2e --- /dev/null +++ b/evaluation/discoverybench/eval_utils/openai_helpers.py @@ -0,0 +1,190 @@ +import json + + +def OPENAI_TOPIC_GEN_MESSAGES(n=10): + return [ + { + 'role': 'system', + 'content': 'You are a helpful assistant who is not talkative. You only respond with the exact answer to a query without additional conversation.', + }, + { + 'role': 'user', + 'content': f'Given `n`, come up with a list of `n` distinct topics and their descriptions. The topics can be absolutely anything. Be as creative as possible. Return your answer as a JSON object. \n\nFor example, for `n`=3, a valid answer might be:\n```json\n{{"topics": [\n {{"id": 1, "topic": "cooking", "description": "Related to recipes, ingredients, chefs, etc."}},\n {{"id": 2, "topic": "sports", "description": "Related to players, stadiums, trophies, etc."}},\n {{"id": 3, "topic": "antiquing", "description": "Related to unique items, history, etc."}}\n]}}```\n\nNow, give me a list for `n`={n}. Remember, pick diverse topics from everything possible. No consecutive topics should be broadly similar. Directly respond with the answer JSON object.', + }, + ] + + +OPENAI_GEN_HYP = { + 'temperature': 1.0, + 'max_tokens': 4096, + 'top_p': 1.0, + 'frequency_penalty': 0, + 'presence_penalty': 0, +} + + +def OPENAI_SEMANTICS_GEN_MESSAGES(dependent, relationship, domain, domain_desc): + return [ + { + 'role': 'system', + 'content': 'You are a helpful assistant who is not talkative. You only respond with the exact answer to a query without additional conversation.', + }, + { + 'role': 'user', + 'content': f'Given the true relationship in a dataset and a given domain, your task is to come up with an interpretation of some real-world concepts that the relationship could be modeling from the provided domain. It\'s okay to be wrong, but suggest something reasonable. Try as much as possible to make sure that the TARGET is actually derivable from the other variables. Give your answer as a JSON object. Here\'s an example:\n\nRelationship for x2 = "(96.4 * x1 ** 3) + (88.72 * x5 ** 2) + (81.96 * x6 ** -2) + (28.13 * x3) + (97.0) + (0 * x4)"\nDomain="Sales"\nDomain description="Related to product distribution, revenues, marketing, etc."\n\nBased on this, the following real-world concepts might be applicable:\n```json\n{{\n "dependent": "x2",\n "relationship": "(96.4 * x1 ** 3) + (88.72 * x5 ** 2) + (81.96 * x6 ** -2) + (28.13 * x3) + (97.0) + (0 * x4)",\n "domain": "Sales",\n "trends": {{\n "x1": "Positive, cubic factor",\n "x2": "TARGET",\n "x3": "Positive, linear factor",\n "x4": "No relation",\n "x5": "Positive quadratic factor",\n "x6": "Positive, inverse quadratic factor"\n }},\n "interpretation": {{\n "x2": {{"description": "Volume of product sales by area", "name": "sales_area", "is_target": true}},\n "x1": {{"description": "Population by area", "name": "pop_area"}},\n "x3": {{"description": "Advertising spending", "name": "ad_spend"}},\n "x4": {{"description": "Gender ratio of marketing team", "name": "gdr_ratio_mkt_team"}},\n "x5": {{"description": "Intensity of marketing campaign", "name": "mkt_intensity"}}\n }},\n "x6": {{"description": "Distance to distribution center", "name": "dist_to_distr_ctr"}}\n}}```\n\nHere\'s a new test question:\nRelationship for {dependent} = "{relationship}"\nDomain = "{domain}"\nDomain description="{domain_desc}"\n\nRespond only with the answer JSON. Make sure that you do not forget to include the TARGET variable in the interpretation object.', + }, + ] + + +def OPENAI_SEMANTICS_GEN_W_MAP_MESSAGES( + dependent, relationship, domain, domain_desc, mapping +): + return [ + { + 'role': 'system', + 'content': 'You are a helpful assistant who is not talkative. You only respond with the exact answer to a query without additional conversation.', + }, + { + 'role': 'user', + 'content': f'Given a partial mapping from variables to real-world concepts and a true relationship in a dataset, your task is to come up with an interpretation of real-world concepts for the variables without any assigned mapping (those starting with x). Suggest something reasonable. The dependent variable must be derivable only from the other variables in the dependent relationship. Give your answer as a JSON object. Here\'s an example:\n\nExample partial mapping and relationship:\n```json\n{{\n "domain": "Sales",\n "domain_description": "Related to product distribution, revenues, marketing, etc.",\n "variable_mapping": {{\n "x1": {{"description": "Population by area", "name": "pop_area"}},\n "x2": {{"description": "Volume of product sales by area", "name": "sales_area"}},\n "x4": {{"description": "Gender ratio of marketing team", "name": "gdr_ratio_mkt_team"}},\n "x6": {{"description": "Distance to distribution center", "name": "dist_to_distr_ctr"}}\n }},\n "dependent_variable": "sales_area",\n "dependent_relationship": "(96.4 * pop_area ** 3) + (88.72 * x5 ** 2) + (81.96 * dist_to_distr_ctr ** -2) + (28.13 * x3) + (97.0)"\n}}```\nBased on this, an example answer would be:\n```json\n{{\n "dependent_variable": "sales_area",\n "missing_mapping": ["x3", "x5"],\n "trends": {{\n "x3": "Positive, linear factor",\n "x5": "Positive quadratic factor"\n }},\n "interpretation": {{\n "x3": {{"description": "Advertising spending", "name": "ad_spend"}},\n "x5": {{"description": "Intensity of marketing campaign", "name": "mkt_intensity"}}\n }}\n}}```\n\nHere\'s a new test question:\n```json\n{{\n "domain": "{domain}",\n "domain_description": "{domain_desc}",\n "variable_mapping": {json.dumps(mapping, indent=2)},\n "dependent_variable": "{dependent}",\n "dependent_relationship": "{relationship}"\n}}```\nRespond only with the answer JSON.', + }, + ] + + +def OPENAI_SEMANTICS_GEN_SUMMARY_MESSAGES(dataset): + return [ + { + 'role': 'system', + 'content': 'You are a helpful assistant who is not talkative. You only respond with the exact answer to a query without additional conversation.', + }, + { + 'role': 'user', + 'content': f'Given the following descriptions of the columns of a dataset, your task is to come up with a natural language overview of the dataset, which should include (1) what the dataset is about, (2) how the data was collected, (3) when the data was collected, and (3) for what purpose the data was collected. Be specific and creative.\n\nExample dataset:\n```json\n{{ \n "dataset": {{ \n "x6": {{"description": "Ancient artifact significance score", "name": "artifact_significance_score", "is_target": true}},\n "x1": {{"description": "Distance to ancient city center", "name": "dist_to_ancient_city_ctr"}},\n "x2": {{"description": "Quantity of discovered relics", "name": "relic_discovery_qty"}},\n "x3": {{"description": "Years since last archaeological expedition", "name": "years_since_exp"}},\n "x4": {{"description": "Number of artifacts in excavation site", "name": "artifact_qty"}},\n "x5": {{"description": "Soil fertility coefficient", "name": "soil_fertility_coef"}},\n "x7": {{"description": "Distance to ancient burial grounds", "name": "dist_to_burial_grounds"}},\n "x8": {{"description": "Population estimate of ancient civilization", "name": "ancient_civilization_pop_estimate"}},\n "x9": {{"description": "Temperature variation in excavation region", "name": "temp_variation"}}\n }}\n}}```\nExample description:\nThis dataset is about archaeological explorations and findings linked to ancient civilizations. The data was collected in the form of field metrics during various archaeological expeditions during the late mid-20th century. The purpose of the data collection is to evaluate the significance of ancient artifacts discovered during excavations.\n\nHere is a new test dataset.\n{json.dumps(dataset, indent=2)}\nProvide only the description.', + }, + ] + + +def OPENAI_GEN_HYPO_MESSAGES(dataset): + return [ + { + 'role': 'system', + 'content': 'You are a helpful assistant who is not talkative. You only respond with the exact answer to a query without additional conversation.', + }, + { + 'role': 'user', + 'content': f'Given a dataset with its descriptions and the true functional relationship between its variables, your task is to generate 3 levels of hypotheses for the stated relationship in plain English. The three levels are "broad", "medium" and "narrow". Make sure that the hypotheses sound natural. *Only include concepts for variables that are present in the provided functional relationship.* Give your answer as a JSON.\n\nFor example, an example dataset might be the following:\n```json\n{{\n "domain": "cybersecurity",\n "summary": "This dataset is about measuring cybersecurity threats in a system. The data was collected by monitoring various cybersecurity metrics in a network environment. The purpose of the data collection is to assess and predict potential cybersecurity risks and vulnerabilities.",\n "variables": [\n {{\n "description": "Level of cybersecurity threat",\n "name": "cybersecurity_threat",\n "is_target": true\n }},\n {{\n "description": "Number of failed login attempts",\n "name": "failed_login_attempts"\n }},\n {{\n "description": "Amount of encrypted data",\n "name": "encrypted_data"\n }},\n {{\n "description": "Frequency of software updates",\n "name": "software_updates"\n }},\n {{\n "description": "Number of antivirus software installed",\n "name": "antivirus_software"\n }},\n {{\n "description": "Quality of firewall protection",\n "name": "firewall_quality"\n }}\n ],\n "relationship": {{\n "dependent": "cybersecurity_threat",\n "relation": "-53.5*encrypted_data**2 - 53.85*failed_login_attempts**2 + 67.75*firewall_quality - 92.16 - 36.68/software_updates**3"\n }}\n}}```\nGiven this dataset, the following is a valid answer:\n```json\n{{\n "broad": {{\n "instruction": "Be vague. Only indicate which concepts might be related but not how they are related",\n "hypothesis": "Threat to cybersecurity is influenced by several factors including the amount of encrypted data, the number of failed login attempts, the quality of the firewall, as well as how often the software is updated."\n }},\n "medium": {{\n "instruction": "Be slightly more specific. For each factor, indicate carefully whether it positively or negatively affects the relationship, but do not indicate what the exponent is.",\n "hypothesis": "Cybersecurity threat tends to decrease with the amount of data encryption, the number of failed login attempts, as well as the frequency of software updates to some extent, while improvement in the firewall quality has a positive effect."\n }},\n "narrow": {{\n "instruction": "Be specific. Communicate the concepts, whether there is a positive or negative effect (be careful), and the meaning of the exponent",\n "hypothesis": "The threat to cybersecurity interacts in a complex manner with various factors. As the amount of encrypted data increases, there is a quadratic decrease in threat. Similarly for the number of failed login attempts, there is a negative quadratic relationship. The quality of the firewall protection on the other hand demonstrates a positive and linear relationship. Finally, the frequency of software updates has an inverse cubic relationship to the threat."\n }},\n}}\n```\n\nBased on this, provide an answer for the following test dataset:\n```json\n{dataset}```\nRespond only with a JSON.', + }, + ] + + +def create_prompt(usr_msg): + return [ + { + 'role': 'system', + 'content': 'You are a helpful assistant who is not talkative. You only respond with the exact answer to a query without additional conversation.', + }, + {'role': 'user', 'content': usr_msg}, + ] + + +def get_response(client, prompt, max_retry=5, model='gpt-3.5-turbo', verbose=False): + n_try = 0 + while n_try < max_retry: + response = client.chat.completions.create( + model=model, messages=create_prompt(prompt), **OPENAI_GEN_HYP + ) + + # COMMENT: changed from + # response.choices[0].message.content.strip().strip('```json').strip('```') + content = response.choices[0].message.content + cleaned_content = content.split('```json')[1].split('```')[0].strip() + output = cleaned_content + try: + response_json = json.loads(output) + return response_json + except ValueError: + if verbose: + print(f'Bad JSON output:\n\n{output}') + n_try += 1 + if n_try < max_retry: + if verbose: + print('Retrying...') + else: + if verbose: + print('Retry limit reached') + return None + + +def get_code_fix( + client, code, error, max_retry=5, model='gpt-3.5-turbo', verbose=False +): + prompt = f"""\ +Given the following code snippet and error message, provide a single-line fix for the error. \ +Note that the code is going to be executed using python `eval`. \ +The code should be executable and should not produce the error message. Be as specific as possible. + +Here's the code and the error: +{{ + "code": "{code}", + "error": "{error}" +}} + +Return only a JSON object with the fixed code in the following format: +```json +{{ + "fixed_code": "..." +}}""" + response = get_response( + client, prompt, max_retry=max_retry, model=model, verbose=verbose + ) + return response + + +def get_new_hypothesis( + client, target, old, expr, cols, model='gpt-3.5-turbo', verbose=False +): + prompt = f"""\ +Given a target column from a dataset, a pandas expression to derive the column from existing columns, a list of \ +existing columns, and a previously written hypothesis text, carefully check if the hypothesis text is consistent with \ +the pandas expression or not. If it is consistent, simply return the hypothesis as it is. If it is not consistent, \ +provide a new natural language hypothesis that is consistent with the pandas expression using only the provided \ +information. Be specific. + +Here's the information: +```json +{{ + "target_column": "{target}", + "pandas_expression": "{expr}", + "existing_columns": {json.dumps(cols, indent=4)} + "old_hypothesis": "{old}", +}}``` + +Give your answer as a new JSON with the following format: +```json +{{ + "hypothesis": "..." +}}""" + response = get_response(client, prompt, model=model, verbose=verbose) + return response + + +def replace_variable(client, expr, old, new, model='gpt-3.5-turbo', verbose=False): + prompt = f"""\ +Given a pandas "expression", replace mentions of the "old" column with its "new" value such that the resultant \ +expression is equivalent to the original expression. + +Here's the information: +```json +{{ + "expression": "{expr}", + "old": "{old}", + "new": "{new}" +}}``` + +Give your answer as a new JSON with the following format: +```json +{{ + "new_expression": "..." +}}""" + response = get_response(client, prompt, model=model, verbose=verbose) + return response diff --git a/evaluation/discoverybench/eval_utils/openai_semantic_gen_prompts.py b/evaluation/discoverybench/eval_utils/openai_semantic_gen_prompts.py new file mode 100644 index 000000000000..a0b5438e4c8a --- /dev/null +++ b/evaluation/discoverybench/eval_utils/openai_semantic_gen_prompts.py @@ -0,0 +1,151 @@ +common_hypothesis_features = [ + '1-2 sentences', + 'surprising finding', + 'includes numeric concepts', + 'includes categorical concepts', + 'includes binary concepts', +] +hypothesis_features = [ + ['requires within-cluster analysis'], + ['requires across-cluster analysis'], + ['corresponds to a polynomial relationship of some columns'], + ['corresponds to a ratio between some columns'], + ['requires temporal analysis'], + ['relationship is based on descriptive statistics of some columns'], + ['requires concepts based on percentage or percentiles'], + ['relationship is only applicable to one cluster in the data and not the others'], +] + +column_features = [ + [ + 'must have one target column', + 'must have quantifiable columns', + 'must have a few categorical columns', + 'make sure the categorical column values do not contain special characters', + 'include a few distractor columns', + ] +] + +common_pandas_features = [ + 'must be executable using python `eval` to create the target column in variable `df` (pandas dataframe)', + "for e.g., df['A']**2 + 3*df['B'] + 9, np.where(df['A'] > 3, 'Yes', 'No'), etc.", + 'variables in pandas_expression must be from the existing columns listed above', + 'variables in pandas_expression must NOT contain the target column itself', +] +pandas_features = [ + ['expression is a quadratic polynomial'], + ['expression is a cubic polynomial'], + ['expression is a ratio of existing columns'], + ['expression is derived through logical combination of existing columns'], + # workflow +] +pandas_features = [common_pandas_features + p for p in pandas_features] + +common_derived_features = [ + '1-2 sentences', + 'includes numeric concepts', + 'includes categorical concepts', + 'includes binary concepts', +] +derived_features = [common_derived_features + h for h in hypothesis_features] +hypothesis_features = [common_hypothesis_features + h for h in hypothesis_features] + +PROMPT_HYP = """\ +Given a dataset topic and description, generate an interesting hypothesis based on \ +the provided instructions. Be creative and come up with an unusual finding. + +```json +{ + "topic": "%s", + "description": "%s", + "hypothesis_features": %s, + "hypothesis": "..." +}``` + +Give your answer as a new JSON with the following format: +```json +{ + "hypothesis": "..." +} +```""" + +PROMPT_COL = """\ +Given a dataset topic, its description, and a true hypothesis that can be determined from it, \ +generate a list of valid columns based on the provided instructions. + +```json +{ + "topic": "%s", + "description": "%s", + "hypothesis": "%s", + "column_instructions": %s, + "columns": [ + { + "col_name": "...", # should be an "_"-separated string + "description": "...", + "data_type": "...", # should be executable using python's `eval` function. E.g., str, float, int, bool + "data_range": {...}, # should be either {"min": ..., "max": ...} or {"values": [...]} + "is_distractor": true/false, # boolean indicating whether this is a distractor that could cause confusion during data analysis + "is_target": true/false # boolean indicating whether this is the target variable for the hypothesis; at least one column should be the target + }, + ... + ], + "pandas_instructions": %s, + "pandas_equation_for_hypothesis": { + "target_col": "...", + "target_col_type": "...", + "target_col_range": {...}, + "independent_cols_in_pandas_expression": [], # list of column names that will be used to derive the target column + "pandas_expression": "..." # expression to derive df[target_col] using df[ind_col1], df[ind_col2], etc. + } +}``` + +Give your answer as a new JSON with the "columns" and "pandas_equation_for_hypothesis" keys filled using the following format: +```json +{ + "columns": [...], + "pandas_equation_for_hypothesis": {...} +} +```""" + +PROMPT_DER = """\ +Given a dataset topic, description, a true hypothesis that can be determined from the data, \ +and a target column from the dataset, generate a hypothesis for the target column using new independent columns not present in the existing columns. + +```json +{ + "topic": "%s", + "description": "%s", + "hypothesis": "%s", + "existing_columns": %s, + "target_column": "%s", + "new_to_target_instructions": %s, + "new_to_target_hypothesis": "...", # describe a relationship between new columns that explains the target column + "new_columns_for_target": [ # do not repeat any of the existing columns in the dataset + { + "col_name": "...", # should be an "_"-separated string + "description": "...", + "data_type": "...", # should be executable using python's `eval` function. E.g., str, float, int, bool + "data_range": {...}, # should be either {"min": ..., "max": ...} or {"values": [...]} + }, + ... + ], + "pandas_instructions": %s, + "pandas_equation_for_new_to_target_hypothesis": { + "target_col": "...", + "target_col_type": "...", + "target_col_range": {...}, + "independent_cols_in_pandas_expression": [], # list of column names from new_columns_for_target that will be used to derive target_col + "pandas_expression": "..." # expression to derive df[target_col] using df[ind_col1], df[ind_col2], etc. + } +}``` + +Give your answer as a new JSON with the "new_to_target_hypothesis", "new_columns_for_target", and \ +"pandas_equation_for_new_to_target_hypothesis" keys filled using the following format: +```json +{ + "new_to_target_hypothesis": "...", + "new_columns_for_target": [...], + "pandas_equation_for_new_to_target_hypothesis": {...} +} +```""" diff --git a/evaluation/discoverybench/run_infer.py b/evaluation/discoverybench/run_infer.py index 0566b56d20e0..700cb3dcbcda 100644 --- a/evaluation/discoverybench/run_infer.py +++ b/evaluation/discoverybench/run_infer.py @@ -5,6 +5,9 @@ import git import pandas as pd +from evaluation.discoverybench.eval_utils.eval_w_subhypo_gen import ( + run_eval_gold_vs_gen_NL_hypo_workflow, +) from evaluation.discoverybench.eval_utils.response_parser import ( extract_gen_hypo_from_logs, ) @@ -31,6 +34,8 @@ from openhands.runtime.base import Runtime from openhands.utils.async_utils import call_async_from_sync +EVALUATION_LLM = 'gpt-4-1106-preview' + DATA_FILES = {} LIBRARIES = [ @@ -297,7 +302,19 @@ def process_instance( # remove when it becomes unnecessary histories = state.history.compatibility_for_eval_history_pairs() - # TODO: add discoverybench evaluation + # DiscoveryBench Evaluation + eval_rec = run_eval_gold_vs_gen_NL_hypo_workflow( + query=instance.query, + gold_hypo=instance.gold_hypo, + gold_workflow='', + gen_hypo=test_result['gen_hypo'], + gen_workflow='', + dataset_meta=instance.dataset_metadata, + llm_used=EVALUATION_LLM, + dataset_type='real', + ) + + test_result['eval_rec'] = eval_rec output = EvalOutput( instance_id=str(instance.instance_id), From 23a8027f7ddbf05963db6148285a60b80a62c3b0 Mon Sep 17 00:00:00 2001 From: Abhijeetsingh Meena Date: Wed, 30 Oct 2024 15:09:41 +0530 Subject: [PATCH 10/15] feat(eval): add run_infer.sh to execute inference Signed-off-by: Abhijeetsingh Meena --- .../discoverybench/scripts/run_infer.sh | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 evaluation/discoverybench/scripts/run_infer.sh diff --git a/evaluation/discoverybench/scripts/run_infer.sh b/evaluation/discoverybench/scripts/run_infer.sh new file mode 100644 index 000000000000..8b9fffd7c579 --- /dev/null +++ b/evaluation/discoverybench/scripts/run_infer.sh @@ -0,0 +1,46 @@ +#!/bin/bash +set -eo pipefail + +source "evaluation/utils/version_control.sh" + +MODEL_CONFIG=$1 +COMMIT_HASH=$2 +AGENT=$3 +EVAL_LIMIT=$4 +NUM_WORKERS=$5 + +if [ -z "$NUM_WORKERS" ]; then + NUM_WORKERS=1 + echo "Number of workers not specified, use default $NUM_WORKERS" +fi + +# ################################################################################ + +checkout_eval_branch + +if [ -z "$AGENT" ]; then + echo "Agent not specified, use default CodeActAgent" + AGENT="CodeActAgent" +fi + +get_agent_version + +echo "AGENT: $AGENT" +echo "AGENT_VERSION: $AGENT_VERSION" +echo "MODEL_CONFIG: $MODEL_CONFIG" + +COMMAND="poetry run python evaluation/discoverybench/run_infer.py \ + --agent-cls $AGENT \ + --llm-config $MODEL_CONFIG \ + --max-iterations 10 \ + --max-chars 10000000 \ + --eval-num-workers $NUM_WORKERS \ + --eval-note $AGENT_VERSION" + +if [ -n "$EVAL_LIMIT" ]; then + echo "EVAL_LIMIT: $EVAL_LIMIT" + COMMAND="$COMMAND --eval-n-limit $EVAL_LIMIT" +fi + +# Run the command +eval $COMMAND From 0374351ddf66cd83dc233561285c6f19c4b855e0 Mon Sep 17 00:00:00 2001 From: Abhijeetsingh Meena Date: Wed, 30 Oct 2024 16:08:46 +0530 Subject: [PATCH 11/15] feat(eval): add AgentConfig to disable function calling and enable jupyter and browsing delegate config Signed-off-by: Abhijeetsingh Meena --- evaluation/discoverybench/run_infer.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/evaluation/discoverybench/run_infer.py b/evaluation/discoverybench/run_infer.py index 700cb3dcbcda..77d72d04775a 100644 --- a/evaluation/discoverybench/run_infer.py +++ b/evaluation/discoverybench/run_infer.py @@ -22,6 +22,7 @@ ) from openhands.controller.state.state import State from openhands.core.config import ( + AgentConfig, AppConfig, SandboxConfig, get_llm_config_arg, @@ -75,6 +76,12 @@ def get_config( workspace_mount_path=None, ) config.set_llm_config(metadata.llm_config) + agent_config = AgentConfig( + function_calling=False, + codeact_enable_jupyter=True, + codeact_enable_browsing_delegate=True, + ) + config.set_agent_config(agent_config) return config From 7d35c51615550944b56116cf78b7b96cc2a5b83c Mon Sep 17 00:00:00 2001 From: Abhijeetsingh Meena Date: Wed, 30 Oct 2024 16:10:14 +0530 Subject: [PATCH 12/15] chore(eval): set execute permission for run_infer.sh Signed-off-by: Abhijeetsingh Meena --- evaluation/discoverybench/scripts/run_infer.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 evaluation/discoverybench/scripts/run_infer.sh diff --git a/evaluation/discoverybench/scripts/run_infer.sh b/evaluation/discoverybench/scripts/run_infer.sh old mode 100644 new mode 100755 From 7f3ab9874ed3d51560247179598b69ea71df1491 Mon Sep 17 00:00:00 2001 From: Abhijeetsingh Meena Date: Wed, 30 Oct 2024 16:23:52 +0530 Subject: [PATCH 13/15] docs(eval): update README to comply with linting rules --- evaluation/discoverybench/README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/evaluation/discoverybench/README.md b/evaluation/discoverybench/README.md index 9b5d5df495c7..a0d8994709df 100644 --- a/evaluation/discoverybench/README.md +++ b/evaluation/discoverybench/README.md @@ -1,6 +1,6 @@ # DiscoveryBench with OpenHands -[DiscoveryBench](https://github.com/allenai/discoverybench/) [(Paper)](https://arxiv.org/abs/2407.01725v1) contains 264 tasks collected across 6 diverse domains, such as biology, economics, and sociology. It incorporates discovery workflows from published papers to approximate the real-world challenges faced by researchers. +[DiscoveryBench](https://github.com/allenai/discoverybench/) [(Paper)](https://arxiv.org/abs/2407.01725v1) contains 264 tasks collected across 6 diverse domains, such as biology, economics, and sociology. It incorporates discovery workflows from published papers to approximate the real-world challenges faced by researchers.

@@ -35,4 +35,3 @@ When the `run_infer.sh` script is started, it will automatically pull the latest - `AGENT`: Use CoderActAgent, right now it only supports that. - `EVAL_LIMIT`: Number of samples to evaluate. - `NUM_WORKERS`: Number of workers to parallelize the evaluation process. - From 75ee54bbc5b062e97e5c255cec80cf2061323051 Mon Sep 17 00:00:00 2001 From: tofarr Date: Wed, 30 Oct 2024 08:14:28 -0600 Subject: [PATCH 14/15] Increase share popup duration from 5s to 10s (#4625) Co-authored-by: openhands --- frontend/src/components/feedback-modal.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/src/components/feedback-modal.tsx b/frontend/src/components/feedback-modal.tsx index 53d0a0dd37a4..f9cf05f0789c 100644 --- a/frontend/src/components/feedback-modal.tsx +++ b/frontend/src/components/feedback-modal.tsx @@ -60,7 +60,7 @@ export function FeedbackModal({ Password: {password} (copy) , - { duration: 5000 }, + { duration: 10000 }, ); }; From e21abce7860d26d9e0880d4dc4d9bd1e2aeab80b Mon Sep 17 00:00:00 2001 From: Robert Brennan Date: Wed, 30 Oct 2024 10:27:25 -0400 Subject: [PATCH 15/15] Load GitHub users list at startup for improved authentication performance (#4567) Co-authored-by: openhands --- openhands/server/listen.py | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/openhands/server/listen.py b/openhands/server/listen.py index 3115943627ee..c3a638534531 100644 --- a/openhands/server/listen.py +++ b/openhands/server/listen.py @@ -68,6 +68,21 @@ GITHUB_CLIENT_ID = os.getenv('GITHUB_CLIENT_ID', '').strip() GITHUB_CLIENT_SECRET = os.getenv('GITHUB_CLIENT_SECRET', '').strip() +# New global variable to store the user list +GITHUB_USER_LIST = None + + +# New function to load the user list +def load_github_user_list(): + global GITHUB_USER_LIST + waitlist = os.getenv('GITHUB_USER_LIST_FILE') + if waitlist: + with open(waitlist, 'r') as f: + GITHUB_USER_LIST = [line.strip() for line in f if line.strip()] + + +load_github_user_list() + @asynccontextmanager async def lifespan(app: FastAPI): @@ -836,22 +851,14 @@ class User(BaseModel): @app.post('/api/authenticate') def authenticate(user: User | None = None): - waitlist = os.getenv('GITHUB_USER_LIST_FILE') + global GITHUB_USER_LIST # Only check if waitlist is provided - if waitlist is not None: - try: - with open(waitlist, 'r') as f: - users = f.read().splitlines() - if user is None or user.login not in users: - return JSONResponse( - status_code=status.HTTP_403_FORBIDDEN, - content={'error': 'User not on waitlist'}, - ) - except FileNotFoundError: + if GITHUB_USER_LIST: + if user is None or user.login not in GITHUB_USER_LIST: return JSONResponse( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - content={'error': 'Waitlist file not found'}, + status_code=status.HTTP_403_FORBIDDEN, + content={'error': 'User not on waitlist'}, ) return JSONResponse(