From 2fab7be207d72982b34f6e8ebeac228d0370a580 Mon Sep 17 00:00:00 2001 From: "alicja.kotyla" Date: Thu, 31 Oct 2024 08:47:13 +0100 Subject: [PATCH 01/26] add synthetic data generation pipeline --- .../evaluation/dataset-generator/generate.py | 63 +++++++++++++++ .../prompt_basic_answer_gen.py | 18 +++++ .../dataset_generator/prompt_passages_gen.py | 22 ++++++ .../dataset_generator/prompt_query_gen.py | 30 ++++++++ .../steps/dontknot_filter_step.py | 30 ++++++++ .../tasks/answer_gen_task.py | 50 ++++++++++++ .../tasks/passages_gen_task.py | 76 +++++++++++++++++++ .../dataset_generator/tasks/query_gen_task.py | 50 ++++++++++++ .../evaluate/dataset_generator/utils.py | 15 ++++ 9 files changed, 354 insertions(+) create mode 100644 examples/evaluation/dataset-generator/generate.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_basic_answer_gen.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_passages_gen.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_query_gen.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/steps/dontknot_filter_step.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/answer_gen_task.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/passages_gen_task.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/query_gen_task.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py diff --git a/examples/evaluation/dataset-generator/generate.py b/examples/evaluation/dataset-generator/generate.py new file mode 100644 index 00000000..46388b45 --- /dev/null +++ b/examples/evaluation/dataset-generator/generate.py @@ -0,0 +1,63 @@ +from distilabel.llms import OpenAILLM +from distilabel.pipeline import Pipeline +import asyncio +from ragbits.evaluate.dataset_generator.tasks.answer_gen_task import AnswerGenTask +from ragbits.evaluate.dataset_generator.tasks.passages_gen_task import PassagesGenTask +from ragbits.evaluate.dataset_generator.tasks.query_gen_task import QueryGenTask +from ragbits.document_search.ingestion.document_processor import DocumentProcessorRouter +from ragbits.document_search.documents.document import DocumentType, Document, DocumentMeta + +from ragbits.document_search.ingestion.providers.unstructured.pdf import UnstructuredPdfProvider + +from datasets import Dataset + +from pathlib import Path + +CORPUS_PATH = Path("corpus-path") +DATASET_HF_PATH = "dataset-hf-path" + + +async def main(): + router = DocumentProcessorRouter( + providers={DocumentType.PDF: UnstructuredPdfProvider(chunking_kwargs={"max_characters": 512})} + ) + document_meta = DocumentMeta.from_local_path(local_path=CORPUS_PATH) + document_processor = router.get_provider(document_meta) + elements = await document_processor.process(document_meta) + dataset = Dataset.from_dict({ + "chunk": [node.content for node in elements if node.element_type=="text"][:2] + }) + + with Pipeline("synthetic-RAG-data") as pipeline: + query_gen_task = QueryGenTask( + llm=OpenAILLM(model="gpt-4o"), + ) + + answer_gen_task = AnswerGenTask( + llm=OpenAILLM(model="gpt-4o") + ) + + passages_gen_task = PassagesGenTask( + llm=OpenAILLM(model="gpt-4o"), + ) + + # TODO: Add I don't know answer step. + + query_gen_task >> answer_gen_task >> passages_gen_task + + distiset = pipeline.run( + use_cache=False, + dataset=dataset + ) + result = distiset["default"]["train"] + result = result.remove_columns(["distilabel_metadata", "model_name"]) + + result.push_to_hub( + DATASET_HF_PATH, + private=False, + ) + +if __name__ == "__main__": + asyncio.run(main()) + + diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_basic_answer_gen.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_basic_answer_gen.py new file mode 100644 index 00000000..7a013c1f --- /dev/null +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_basic_answer_gen.py @@ -0,0 +1,18 @@ +from pydantic import BaseModel +from ragbits.core.prompt import Prompt + + +class BasicAnswerGenInput(BaseModel): + chunk: str + question: str + + +class BasicAnswerGenPrompt(Prompt[BasicAnswerGenInput, str]): + + system_prompt = """You are an AI assistant to answer the given question in the provide evidence text. + Do not mention any of these in the answer: "in the given text", "in the provided information", etc. +Users do not know the passage source of the answer, so it should not be mentioned in the answer. +You can find the evidence from the given text about question, and you have to write a proper answer to the given question. +If you don't know the answer just say: I don't know.""" + + user_prompt = "Text:\n<|text_start|>\n {{ chunk }} \n<|text_end|>\n\nQuestion:\n {{ question }} \n\nAnswer:" \ No newline at end of file diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_passages_gen.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_passages_gen.py new file mode 100644 index 00000000..c9a92693 --- /dev/null +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_passages_gen.py @@ -0,0 +1,22 @@ +from pydantic import BaseModel +from ragbits.core.prompt import Prompt + + +class PassagesGenInput(BaseModel): + question: str + answer: str + chunk: str + + +class PassagesGenPrompt(Prompt[PassagesGenInput, str]): + + system_prompt = """You are an AI tasked with retrieving passages (one or many) from the provided Chunk that that contain information needed to generate the provided Answer to the given Question. + +Instructions: +1. Each Passage MUST be VERBATIM and EXACT, without any modifications +2. Please provide the response in the form of a Python list. It should begin with "[" and end with "]" +3. You MUST start your answer with "[" +4. The Chunk ALWAYS contains information needed to justify the Answer +5. Each passage must be as BRIEF as possible; DO NOT RETURN FULL SENTENCES""" + + user_prompt = "Question:\n {{ question }} \nAnswer:\n {{ answer }} \nChunk:\n {{ chunk }}\n\nPassages:" \ No newline at end of file diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_query_gen.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_query_gen.py new file mode 100644 index 00000000..34e2081e --- /dev/null +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_query_gen.py @@ -0,0 +1,30 @@ +from pydantic import BaseModel +from ragbits.core.prompt import Prompt + + +class QueryGenInput(BaseModel): + chunk: str + + +class QueryGenPrompt(Prompt[QueryGenInput, str]): + + system_prompt = """You're an AI tasked to convert Text into a factoid question. +Factoid questions are those seeking brief, factual information that can be easily verified. They typically require a yes or no answer or a brief explanation and often inquire about specific details such as dates, names, places, or events. + +Examples of factoid questions include: + +- What is the incoming shipment report? +- What angle should I set my ladder at? +- What documents do I need to be a proof of transaction? + +Instructions: +1. Questions MUST BE extracted from given Text +2. Questions MUST BE as SHORT a possible +3. Questions should be as detailed as possible from Text +4. Create questions that ask about factual information from the Text +5. Only return ONE question +6. Frame questions in a first-person, INFROMAL style, as if the employee is seeking advice or clarification while working +7. Do not mention any of these in the questions: "in the given text", "in the provided information", etc. +Users do not know the passage source of the question, so it should not be mentioned in the question.""" + + user_prompt = "Text: {{ chunk }}\n\nGenerated Question from the Text:\n" \ No newline at end of file diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/steps/dontknot_filter_step.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/steps/dontknot_filter_step.py new file mode 100644 index 00000000..8723cf99 --- /dev/null +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/steps/dontknot_filter_step.py @@ -0,0 +1,30 @@ +from typing import Any, Dict, List + +from pydantic import Field + +from distilabel.steps.base import GeneratorStep + +def dontknow_filter_rule_based(row: dict[str, Any], dont_know_phrases: list[str]) -> bool: + + return not any( + phrase in s for phrase in dont_know_phrases for s in row["basic_answer"] + ) + +class DontKnowFilter(GeneratorStep): + + data: list[dict[str, Any]] = Field(default_factory=list, exclude=True) + dont_know_phrases: list[str] = [ + "I don't know", + "I do not know", + "don't know", + ] + + @staticmethod + def _transform_data(data: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + # TODO + pass + + @property + def outputs(self) -> List[str]: + # TODO + pass \ No newline at end of file diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/answer_gen_task.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/answer_gen_task.py new file mode 100644 index 00000000..94584361 --- /dev/null +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/answer_gen_task.py @@ -0,0 +1,50 @@ +from typing import Any + +from distilabel.steps.tasks import TextGeneration +from ragbits.core.prompt.base import ChatFormat + +from ragbits.evaluate.dataset_generator.prompt_basic_answer_gen import BasicAnswerGenPrompt, BasicAnswerGenInput + +class AnswerGenTask(TextGeneration): + """ + A task for generating basic answers to questions based on a provided text chunk. This class extends + the `TextGeneration` task from the `distilabel` package. + """ + + @property + def inputs(self) -> list[str]: + """Defines the input fields required for this task.""" + return ["chunk", "question"] + + def format_input(self, input: dict[str, Any]) -> ChatFormat: + """ + Formats the input data for the text generation model by creating a `ChatFormat` object. + + Args: + input: A dictionary containing "chunk" (text source) and "question" keys. + + Returns: + The formatted chat object containing the inputs for text generation. + """ + chat = BasicAnswerGenPrompt(BasicAnswerGenInput(chunk=input["chunk"], question=input["question"])).chat + return chat + + @property + def outputs(self) -> list[str]: + """Defines the output fields generated by this task.""" + return ["basic_answer"] + + def format_output( + self, output: str, input: dict[str, Any] | None = None + ) -> dict[str, str]: + """ + Formats the model's output into a structured dictionary with the "basic_answer" key. + + Args: + output: The raw output generated by the text generation model. + input: Optional; not typically used in this formatting. + + Returns: + A dictionary with "basic_answer" as the key and the generated output as its value. + """ + return {"basic_answer": output} diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/passages_gen_task.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/passages_gen_task.py new file mode 100644 index 00000000..598b0c7a --- /dev/null +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/passages_gen_task.py @@ -0,0 +1,76 @@ +from typing import Any + +from distilabel.steps.tasks import TextGeneration + +from ragbits.core.prompt.base import ChatFormat +from ragbits.evaluate.dataset_generator.utils import get_closest_substring, get_passages_list +from ragbits.evaluate.dataset_generator.prompt_passages_gen import PassagesGenInput, PassagesGenPrompt + +class PassagesGenTask(TextGeneration): + """ + A task for generating passages related to a specific question and answer from a text chunk. + """ + + get_matches: bool = False + + @property + def inputs(self) -> list[str]: + """Defines the input fields required for this task.""" + return ["chunk", "question", "basic_answer"] + + def format_input(self, input: dict[str, Any]) -> ChatFormat: + """ + Formats the input data for generating passages based on the provided "chunk", "question", and + "basic_answer" values. + + Args: + input: A dictionary containing "chunk", "question", and "basic_answer". + + Returns: + The formatted chat object containing the inputs for passage generation. + """ + chat = PassagesGenPrompt( + PassagesGenInput( + question=input["question"], + answer=input["basic_answer"], + chunk=input["chunk"] + ) + ).chat + return chat + + @property + def outputs(self) -> list[str]: + """Defines the output fields generated by this task.""" + return ["question", "chunk", "passages"] + + def format_output( + self, output: str, input: dict[str, Any] | None = None + ) -> dict[str, list[str]]: + """ + Formats the model's output into a structured dictionary with "question", "chunk", and "passages". + If `get_matches` is `True`, attempts to find the closest matches for each passage within the + provided chunk. + + Args: + output: The raw output generated by the text generation model. + input: Required if `get_matches` is `True`, containing "chunk" + and "question". + + Returns: + A dictionary with "chunk", "question", and a list of "passages". + """ + passages = get_passages_list(output) or [] + + if self.get_matches: + matched_passages = [] + + for passage in passages: + if passage in input["chunk"]: + matched_passages.append(passage) + else: + matched_passage = get_closest_substring(input["chunk"], passage) + matched_passages.append(matched_passage) + + return {"chunk": input["chunk"], "question": input["question"], "passages": matched_passages} + + return {"chunk": input["chunk"], "question": input["question"], "passages": passages} diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/query_gen_task.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/query_gen_task.py new file mode 100644 index 00000000..d2e6d9e7 --- /dev/null +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/query_gen_task.py @@ -0,0 +1,50 @@ +from typing import Any + +from distilabel.steps.tasks import TextGeneration +from distilabel.steps import Step, StepInput + +from ragbits.core.prompt.base import ChatFormat +from ragbits.evaluate.dataset_generator.prompt_query_gen import QueryGenPrompt, QueryGenInput + +class QueryGenTask(TextGeneration): + """ + A task for generating a question based on a provided text chunk. + """ + + @property + def inputs(self) -> list[str]: + """Defines the input fields required for this task.""" + return ["chunk"] + + def format_input(self, input: dict[str, Any]) -> ChatFormat: + """ + Formats the input data for generating a question based on the provided "chunk". + + Args: + input: A dictionary containing a single "chunk" key with the text input. + + Returns: + The formatted chat object containing the input for query generation. + """ + chat = QueryGenPrompt(QueryGenInput(chunk=input["chunk"])).chat + return chat + + @property + def outputs(self) -> list[str]: + """Defines the output fields generated by this task.""" + return ["question", "chunk"] + + def format_output( + self, output: str, input: dict[str, Any] | None = None + ) -> dict[str, str]: + """ + Formats the generated question into a structured dictionary with the original "chunk" input. + + Args: + output: The generated question. + input: Optional; contains "chunk" key with the original input chunk. + + Returns: + A dictionary containing "chunk" and "question". + """ + return {"chunk": input["chunk"], "question": output} diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py new file mode 100644 index 00000000..de4605a1 --- /dev/null +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py @@ -0,0 +1,15 @@ +import re +from difflib import SequenceMatcher +from itertools import combinations + +def get_closest_substring(long, short): + a, b = max( + combinations(re.finditer('|'.join(short.split()), long), 2), + key=lambda c: SequenceMatcher(None, long[c[0].start():c[1].end()], short).ratio() + ) + return long[a.start():b.end()] + +def get_passages_list(raw_passages: str) -> list[str]: + passages = raw_passages.split("[")[1] + passages = passages.split("]")[0] + return eval("[" + passages + "]") From ebfb43f6b37cd47101530e84368f64705a6ef97f Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Thu, 7 Nov 2024 17:57:43 +0100 Subject: [PATCH 02/26] dataset generation wip --- .../evaluation/dataset-generator/generate.py | 34 +++--- .../evaluate/dataset_generator/__init__.py | 0 .../prompt_basic_answer_gen.py | 18 --- .../dataset_generator/prompt_passages_gen.py | 22 ---- .../dataset_generator/prompt_query_gen.py | 30 ----- .../dataset_generator/prompts/__init__.py | 0 .../evaluate/dataset_generator/prompts/qa.py | 66 +++++++++++ .../dataset_generator/tasks/__init__.py | 0 .../tasks/answer_gen_task.py | 50 --------- .../tasks/filter/__init__.py | 0 .../dataset_generator/tasks/filter/base.py | 21 ++++ .../tasks/filter/dont_know.py | 23 ++++ .../tasks/passages_gen_task.py | 76 ------------- .../tasks/text_generation/__init__.py | 0 .../base.py} | 36 +++--- .../tasks/text_generation/qa.py | 104 ++++++++++++++++++ 16 files changed, 249 insertions(+), 231 deletions(-) create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/__init__.py delete mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_basic_answer_gen.py delete mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_passages_gen.py delete mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_query_gen.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/__init__.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/qa.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/__init__.py delete mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/answer_gen_task.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/__init__.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/base.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py delete mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/passages_gen_task.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/__init__.py rename packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/{query_gen_task.py => text_generation/base.py} (59%) create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py diff --git a/examples/evaluation/dataset-generator/generate.py b/examples/evaluation/dataset-generator/generate.py index 46388b45..f784b3d5 100644 --- a/examples/evaluation/dataset-generator/generate.py +++ b/examples/evaluation/dataset-generator/generate.py @@ -1,31 +1,28 @@ from distilabel.llms import OpenAILLM from distilabel.pipeline import Pipeline import asyncio -from ragbits.evaluate.dataset_generator.tasks.answer_gen_task import AnswerGenTask -from ragbits.evaluate.dataset_generator.tasks.passages_gen_task import PassagesGenTask -from ragbits.evaluate.dataset_generator.tasks.query_gen_task import QueryGenTask -from ragbits.document_search.ingestion.document_processor import DocumentProcessorRouter -from ragbits.document_search.documents.document import DocumentType, Document, DocumentMeta +from ragbits.evaluate.dataset_generator.tasks.text_generation.qa import AnswerGenTask, PassagesGenTask, QueryGenTask +from ragbits.evaluate.dataset_generator.tasks.filter.dont_know import DontKnowFilter -from ragbits.document_search.ingestion.providers.unstructured.pdf import UnstructuredPdfProvider from datasets import Dataset from pathlib import Path -CORPUS_PATH = Path("corpus-path") -DATASET_HF_PATH = "dataset-hf-path" +CORPUS_PATH = Path("osha3192.pdf") +DATASET_HF_PATH = "osho" async def main(): - router = DocumentProcessorRouter( - providers={DocumentType.PDF: UnstructuredPdfProvider(chunking_kwargs={"max_characters": 512})} - ) - document_meta = DocumentMeta.from_local_path(local_path=CORPUS_PATH) - document_processor = router.get_provider(document_meta) - elements = await document_processor.process(document_meta) + # router = DocumentProcessorRouter( + # providers={DocumentType.PDF: UnstructuredPdfProvider(chunking_kwargs={"max_characters": 512})} + # ) + # document_meta = DocumentMeta.from_local_path(local_path=CORPUS_PATH) + # document_processor = router.get_provider(document_meta) + # elements = await document_processor.process(document_meta) + FACTOIDS = ["Neural networks are algorithms capable of recognition and processing the data structure", "Warsaw is capital of Poland", "Ambafatima"] dataset = Dataset.from_dict({ - "chunk": [node.content for node in elements if node.element_type=="text"][:2] + "chunk": FACTOIDS }) with Pipeline("synthetic-RAG-data") as pipeline: @@ -37,13 +34,15 @@ async def main(): llm=OpenAILLM(model="gpt-4o") ) + passages_gen_task = PassagesGenTask( llm=OpenAILLM(model="gpt-4o"), ) - # TODO: Add I don't know answer step. + dont_know_filter = DontKnowFilter(task=answer_gen_task) + - query_gen_task >> answer_gen_task >> passages_gen_task + query_gen_task >> answer_gen_task >> passages_gen_task >> dont_know_filter distiset = pipeline.run( use_cache=False, @@ -52,6 +51,7 @@ async def main(): result = distiset["default"]["train"] result = result.remove_columns(["distilabel_metadata", "model_name"]) + breakpoint() result.push_to_hub( DATASET_HF_PATH, private=False, diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/__init__.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_basic_answer_gen.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_basic_answer_gen.py deleted file mode 100644 index 7a013c1f..00000000 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_basic_answer_gen.py +++ /dev/null @@ -1,18 +0,0 @@ -from pydantic import BaseModel -from ragbits.core.prompt import Prompt - - -class BasicAnswerGenInput(BaseModel): - chunk: str - question: str - - -class BasicAnswerGenPrompt(Prompt[BasicAnswerGenInput, str]): - - system_prompt = """You are an AI assistant to answer the given question in the provide evidence text. - Do not mention any of these in the answer: "in the given text", "in the provided information", etc. -Users do not know the passage source of the answer, so it should not be mentioned in the answer. -You can find the evidence from the given text about question, and you have to write a proper answer to the given question. -If you don't know the answer just say: I don't know.""" - - user_prompt = "Text:\n<|text_start|>\n {{ chunk }} \n<|text_end|>\n\nQuestion:\n {{ question }} \n\nAnswer:" \ No newline at end of file diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_passages_gen.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_passages_gen.py deleted file mode 100644 index c9a92693..00000000 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_passages_gen.py +++ /dev/null @@ -1,22 +0,0 @@ -from pydantic import BaseModel -from ragbits.core.prompt import Prompt - - -class PassagesGenInput(BaseModel): - question: str - answer: str - chunk: str - - -class PassagesGenPrompt(Prompt[PassagesGenInput, str]): - - system_prompt = """You are an AI tasked with retrieving passages (one or many) from the provided Chunk that that contain information needed to generate the provided Answer to the given Question. - -Instructions: -1. Each Passage MUST be VERBATIM and EXACT, without any modifications -2. Please provide the response in the form of a Python list. It should begin with "[" and end with "]" -3. You MUST start your answer with "[" -4. The Chunk ALWAYS contains information needed to justify the Answer -5. Each passage must be as BRIEF as possible; DO NOT RETURN FULL SENTENCES""" - - user_prompt = "Question:\n {{ question }} \nAnswer:\n {{ answer }} \nChunk:\n {{ chunk }}\n\nPassages:" \ No newline at end of file diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_query_gen.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_query_gen.py deleted file mode 100644 index 34e2081e..00000000 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompt_query_gen.py +++ /dev/null @@ -1,30 +0,0 @@ -from pydantic import BaseModel -from ragbits.core.prompt import Prompt - - -class QueryGenInput(BaseModel): - chunk: str - - -class QueryGenPrompt(Prompt[QueryGenInput, str]): - - system_prompt = """You're an AI tasked to convert Text into a factoid question. -Factoid questions are those seeking brief, factual information that can be easily verified. They typically require a yes or no answer or a brief explanation and often inquire about specific details such as dates, names, places, or events. - -Examples of factoid questions include: - -- What is the incoming shipment report? -- What angle should I set my ladder at? -- What documents do I need to be a proof of transaction? - -Instructions: -1. Questions MUST BE extracted from given Text -2. Questions MUST BE as SHORT a possible -3. Questions should be as detailed as possible from Text -4. Create questions that ask about factual information from the Text -5. Only return ONE question -6. Frame questions in a first-person, INFROMAL style, as if the employee is seeking advice or clarification while working -7. Do not mention any of these in the questions: "in the given text", "in the provided information", etc. -Users do not know the passage source of the question, so it should not be mentioned in the question.""" - - user_prompt = "Text: {{ chunk }}\n\nGenerated Question from the Text:\n" \ No newline at end of file diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/__init__.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/qa.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/qa.py new file mode 100644 index 00000000..23bed1b5 --- /dev/null +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/qa.py @@ -0,0 +1,66 @@ +from pydantic import BaseModel +from ragbits.core.prompt import Prompt + + +class BasicAnswerGenInput(BaseModel): + chunk: str + question: str + + +class BasicAnswerGenPrompt(Prompt[BasicAnswerGenInput, str]): + + system_prompt = """You are an AI assistant to answer the given question in the provide evidence text. + Do not mention any of these in the answer: "in the given text", "in the provided information", etc. +Users do not know the passage source of the answer, so it should not be mentioned in the answer. +You can find the evidence from the given text about question, and you have to write a proper answer to the given question. +If you don't know the answer just say: I don't know.""" + + user_prompt = "Text:\n<|text_start|>\n {{ chunk }} \n<|text_end|>\n\nQuestion:\n {{ question }} \n\nAnswer:" + + +class PassagesGenInput(BaseModel): + question: str + basic_answer: str + chunk: str + + +class PassagesGenPrompt(Prompt[PassagesGenInput, str]): + + system_prompt = """You are an AI tasked with retrieving passages (one or many) from the provided Chunk that that contain information needed to generate the provided Answer to the given Question. + +Instructions: +1. Each Passage MUST be VERBATIM and EXACT, without any modifications +2. Please provide the response in the form of a Python list. It should begin with "[" and end with "]" +3. You MUST start your answer with "[" +4. The Chunk ALWAYS contains information needed to justify the Answer +5. Each passage must be as BRIEF as possible; DO NOT RETURN FULL SENTENCES""" + + user_prompt = "Question:\n {{ question }} \nAnswer:\n {{ basic_answer }} \nChunk:\n {{ chunk }}\n\nPassages:" + + +class QueryGenInput(BaseModel): + chunk: str + + +class QueryGenPrompt(Prompt[QueryGenInput, str]): + + system_prompt = """You're an AI tasked to convert Text into a factoid question. +Factoid questions are those seeking brief, factual information that can be easily verified. They typically require a yes or no answer or a brief explanation and often inquire about specific details such as dates, names, places, or events. + +Examples of factoid questions include: + +- What is the incoming shipment report? +- What angle should I set my ladder at? +- What documents do I need to be a proof of transaction? + +Instructions: +1. Questions MUST BE extracted from given Text +2. Questions MUST BE as SHORT a possible +3. Questions should be as detailed as possible from Text +4. Create questions that ask about factual information from the Text +5. Only return ONE question +6. Frame questions in a first-person, INFROMAL style, as if the employee is seeking advice or clarification while working +7. Do not mention any of these in the questions: "in the given text", "in the provided information", etc. +Users do not know the passage source of the question, so it should not be mentioned in the question.""" + + user_prompt = "Text: {{ chunk }}\n\nGenerated Question from the Text:\n" diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/__init__.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/answer_gen_task.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/answer_gen_task.py deleted file mode 100644 index 94584361..00000000 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/answer_gen_task.py +++ /dev/null @@ -1,50 +0,0 @@ -from typing import Any - -from distilabel.steps.tasks import TextGeneration -from ragbits.core.prompt.base import ChatFormat - -from ragbits.evaluate.dataset_generator.prompt_basic_answer_gen import BasicAnswerGenPrompt, BasicAnswerGenInput - -class AnswerGenTask(TextGeneration): - """ - A task for generating basic answers to questions based on a provided text chunk. This class extends - the `TextGeneration` task from the `distilabel` package. - """ - - @property - def inputs(self) -> list[str]: - """Defines the input fields required for this task.""" - return ["chunk", "question"] - - def format_input(self, input: dict[str, Any]) -> ChatFormat: - """ - Formats the input data for the text generation model by creating a `ChatFormat` object. - - Args: - input: A dictionary containing "chunk" (text source) and "question" keys. - - Returns: - The formatted chat object containing the inputs for text generation. - """ - chat = BasicAnswerGenPrompt(BasicAnswerGenInput(chunk=input["chunk"], question=input["question"])).chat - return chat - - @property - def outputs(self) -> list[str]: - """Defines the output fields generated by this task.""" - return ["basic_answer"] - - def format_output( - self, output: str, input: dict[str, Any] | None = None - ) -> dict[str, str]: - """ - Formats the model's output into a structured dictionary with the "basic_answer" key. - - Args: - output: The raw output generated by the text generation model. - input: Optional; not typically used in this formatting. - - Returns: - A dictionary with "basic_answer" as the key and the generated output as its value. - """ - return {"basic_answer": output} diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/__init__.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/base.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/base.py new file mode 100644 index 00000000..060d1ebf --- /dev/null +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/base.py @@ -0,0 +1,21 @@ +from abc import ABC, abstractmethod +from distilabel.steps import Step, StepInput, StepOutput +from ..text_generation.base import BaseDistilabelTask + + +class BaseFilter(Step, ABC): + def __init__(self, task: BaseDistilabelTask, **kwargs): + super().__init__(**kwargs) + self._task = task + + @property + def inputs(self) -> "StepColumns": + return self._task.outputs + + @property + def outputs(self) -> "StepColumns": + return self._task.outputs + + @abstractmethod + def process(self, *inputs: StepInput) -> "StepOutput": + pass \ No newline at end of file diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py new file mode 100644 index 00000000..ac96052f --- /dev/null +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py @@ -0,0 +1,23 @@ +from typing import Any +from distilabel.steps import StepInput + +from .base import BaseFilter + + +DONT_KNOW_PHRASES: list[str] = [ + "I don't know", + "I do not know", + "don't know", + ] + + +class DontKnowFilter(BaseFilter): + + + def process(self, *inputs: StepInput) -> "StepOutput": + result = [{input_type: inp[input_type] for input_type in inp} for inp in inputs[0] if not self._is_dont_know(inp)] + yield result + + @staticmethod + def _is_dont_know(inp: dict[str, Any]) -> bool: + return any(s.lower() in inp["basic_answer"].lower() for s in DONT_KNOW_PHRASES) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/passages_gen_task.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/passages_gen_task.py deleted file mode 100644 index 598b0c7a..00000000 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/passages_gen_task.py +++ /dev/null @@ -1,76 +0,0 @@ -from typing import Any - -from distilabel.steps.tasks import TextGeneration - -from ragbits.core.prompt.base import ChatFormat -from ragbits.evaluate.dataset_generator.utils import get_closest_substring, get_passages_list -from ragbits.evaluate.dataset_generator.prompt_passages_gen import PassagesGenInput, PassagesGenPrompt - -class PassagesGenTask(TextGeneration): - """ - A task for generating passages related to a specific question and answer from a text chunk. - """ - - get_matches: bool = False - - @property - def inputs(self) -> list[str]: - """Defines the input fields required for this task.""" - return ["chunk", "question", "basic_answer"] - - def format_input(self, input: dict[str, Any]) -> ChatFormat: - """ - Formats the input data for generating passages based on the provided "chunk", "question", and - "basic_answer" values. - - Args: - input: A dictionary containing "chunk", "question", and "basic_answer". - - Returns: - The formatted chat object containing the inputs for passage generation. - """ - chat = PassagesGenPrompt( - PassagesGenInput( - question=input["question"], - answer=input["basic_answer"], - chunk=input["chunk"] - ) - ).chat - return chat - - @property - def outputs(self) -> list[str]: - """Defines the output fields generated by this task.""" - return ["question", "chunk", "passages"] - - def format_output( - self, output: str, input: dict[str, Any] | None = None - ) -> dict[str, list[str]]: - """ - Formats the model's output into a structured dictionary with "question", "chunk", and "passages". - If `get_matches` is `True`, attempts to find the closest matches for each passage within the - provided chunk. - - Args: - output: The raw output generated by the text generation model. - input: Required if `get_matches` is `True`, containing "chunk" - and "question". - - Returns: - A dictionary with "chunk", "question", and a list of "passages". - """ - passages = get_passages_list(output) or [] - - if self.get_matches: - matched_passages = [] - - for passage in passages: - if passage in input["chunk"]: - matched_passages.append(passage) - else: - matched_passage = get_closest_substring(input["chunk"], passage) - matched_passages.append(matched_passage) - - return {"chunk": input["chunk"], "question": input["question"], "passages": matched_passages} - - return {"chunk": input["chunk"], "question": input["question"], "passages": passages} diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/__init__.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/query_gen_task.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py similarity index 59% rename from packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/query_gen_task.py rename to packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py index d2e6d9e7..ffccbd69 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/query_gen_task.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py @@ -1,20 +1,24 @@ +from abc import ABC, abstractmethod from typing import Any - from distilabel.steps.tasks import TextGeneration -from distilabel.steps import Step, StepInput +from distilabel.llms.base import LLM +from ragbits.core.prompt import Prompt, ChatFormat + -from ragbits.core.prompt.base import ChatFormat -from ragbits.evaluate.dataset_generator.prompt_query_gen import QueryGenPrompt, QueryGenInput +class BaseDistilabelTask(TextGeneration, ABC): + def __init__(self, llm: LLM, inputs: list[str], outputs: list[str], prompt_class: type[Prompt]): + super().__init__(llm=llm) + self._inputs = inputs + self._outputs = outputs + self._prompt_class = prompt_class -class QueryGenTask(TextGeneration): - """ - A task for generating a question based on a provided text chunk. - """ + @property + def inputs(self): + return self._inputs @property - def inputs(self) -> list[str]: - """Defines the input fields required for this task.""" - return ["chunk"] + def outputs(self) -> list[str]: + return self._outputs def format_input(self, input: dict[str, Any]) -> ChatFormat: """ @@ -26,14 +30,10 @@ def format_input(self, input: dict[str, Any]) -> ChatFormat: Returns: The formatted chat object containing the input for query generation. """ - chat = QueryGenPrompt(QueryGenInput(chunk=input["chunk"])).chat + chat = self._prompt_class(self._prompt_class.input_type(**input)).chat return chat - @property - def outputs(self) -> list[str]: - """Defines the output fields generated by this task.""" - return ["question", "chunk"] - + @abstractmethod def format_output( self, output: str, input: dict[str, Any] | None = None ) -> dict[str, str]: @@ -47,4 +47,4 @@ def format_output( Returns: A dictionary containing "chunk" and "question". """ - return {"chunk": input["chunk"], "question": output} + pass diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py new file mode 100644 index 00000000..f42bc8f3 --- /dev/null +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py @@ -0,0 +1,104 @@ +from typing import Any +from distilabel.llms.base import LLM + +from ragbits.core.prompt import Prompt +from ...prompts.qa import BasicAnswerGenPrompt, PassagesGenPrompt, QueryGenPrompt +from ...utils import get_closest_substring, get_passages_list + + + +from .base import BaseDistilabelTask + + +class QueryGenTask(BaseDistilabelTask): + """ + A task for generating a question based on a provided text chunk. + """ + + def __init__(self, llm: LLM, prompt_class: type[Prompt] = QueryGenPrompt): + super().__init__(llm=llm, inputs=["chunk"], outputs=["question", "chunk"], prompt_class=prompt_class) + + def format_output( + self, output: str, input: dict[str, Any] | None = None + ) -> dict[str, str]: + """ + Formats the generated question into a structured dictionary with the original "chunk" input. + + Args: + output: The generated question. + input: Optional; contains "chunk" key with the original input chunk. + + Returns: + A dictionary containing "chunk" and "question". + """ + return {"chunk": input["chunk"], "question": output} + + +class PassagesGenTask(BaseDistilabelTask): + """ + A task for generating passages related to a specific question and answer from a text chunk. + """ + + get_matches: bool = False + + def __init__(self, llm: LLM, prompt_class: type[Prompt] = PassagesGenPrompt): + super().__init__(llm=llm, inputs=["chunk", "question", "basic_answer"], + outputs=["question", "chunk", "passages"], prompt_class=prompt_class) + + + def format_output( + self, output: str, input: dict[str, Any] | None = None + ) -> dict[str, list[str]]: + """ + Formats the model's output into a structured dictionary with "question", "chunk", and "passages". + If `get_matches` is `True`, attempts to find the closest matches for each passage within the + provided chunk. + + Args: + output: The raw output generated by the text generation model. + input: Required if `get_matches` is `True`, containing "chunk" + and "question". + + Returns: + A dictionary with "chunk", "question", and a list of "passages". + """ + passages = get_passages_list(output) or [] + + if self.get_matches: + matched_passages = [] + + for passage in passages: + if passage in input["chunk"]: + matched_passages.append(passage) + else: + matched_passage = get_closest_substring(input["chunk"], passage) + matched_passages.append(matched_passage) + + return {"chunk": input["chunk"], "question": input["question"], "passages": matched_passages} + + return {"chunk": input["chunk"], "question": input["question"], "passages": passages} + + +class AnswerGenTask(BaseDistilabelTask): + """ + A task for generating basic answers to questions based on a provided text chunk. This class extends + the `TextGeneration` task from the `distilabel` package. + """ + + def __init__(self, llm: LLM, prompt_class: type[Prompt] = BasicAnswerGenPrompt): + super().__init__(llm=llm, inputs=["chunk", "question"], outputs=["basic_answer"], prompt_class=prompt_class) + + def format_output( + self, output: str, input: dict[str, Any] | None = None + ) -> dict[str, str]: + """ + Formats the model's output into a structured dictionary with the "basic_answer" key. + + Args: + output: The raw output generated by the text generation model. + input: Optional; not typically used in this formatting. + + Returns: + A dictionary with "basic_answer" as the key and the generated output as its value. + """ + return {"basic_answer": output} \ No newline at end of file From 82a62c91f9f24105e0e2ba1324c0f75b86b4dc95 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Thu, 7 Nov 2024 18:49:13 +0100 Subject: [PATCH 03/26] configurable generation pipeline --- .../dataset-generator/config/generate.yaml | 5 ++ .../dataset-generator/config/pipeline/qa.yaml | 15 ++++ .../evaluation/dataset-generator/generate.py | 72 ++++--------------- .../evaluate/dataset_generator/pipeline.py | 39 ++++++++++ .../evaluate/dataset_generator/prompts/qa.py | 3 - .../steps/dontknot_filter_step.py | 16 ++--- .../dataset_generator/tasks/filter/base.py | 2 +- .../tasks/filter/dont_know.py | 14 ++-- .../tasks/text_generation/base.py | 4 +- .../tasks/text_generation/qa.py | 26 +++---- .../evaluate/dataset_generator/utils.py | 8 ++- .../src/ragbits/evaluate/utils.py | 11 ++- 12 files changed, 110 insertions(+), 105 deletions(-) create mode 100644 examples/evaluation/dataset-generator/config/generate.yaml create mode 100644 examples/evaluation/dataset-generator/config/pipeline/qa.yaml create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py diff --git a/examples/evaluation/dataset-generator/config/generate.yaml b/examples/evaluation/dataset-generator/config/generate.yaml new file mode 100644 index 00000000..6fe0fa5c --- /dev/null +++ b/examples/evaluation/dataset-generator/config/generate.yaml @@ -0,0 +1,5 @@ +defaults: + - pipeline: qa + - _self_ + +input_name: chunk diff --git a/examples/evaluation/dataset-generator/config/pipeline/qa.yaml b/examples/evaluation/dataset-generator/config/pipeline/qa.yaml new file mode 100644 index 00000000..b88edab9 --- /dev/null +++ b/examples/evaluation/dataset-generator/config/pipeline/qa.yaml @@ -0,0 +1,15 @@ +name: synthetic-RAG-data +tasks: + - type: ragbits.evaluate.dataset_generator.tasks.text_generation.qa:QueryGenTask + llm: + provider_type: distilabel.llms:OpenAILLM + name: gpt-4o + - type: ragbits.evaluate.dataset_generator.tasks.text_generation.qa:AnswerGenTask + llm: + provider_type: distilabel.llms:OpenAILLM + name: gpt-4o + - type: ragbits.evaluate.dataset_generator.tasks.text_generation.qa:PassagesGenTask + llm: + provider_type: distilabel.llms:OpenAILLM + name: gpt-4o + filter: ragbits.evaluate.dataset_generator.tasks.filter.dont_know:DontKnowFilter diff --git a/examples/evaluation/dataset-generator/generate.py b/examples/evaluation/dataset-generator/generate.py index f784b3d5..c5b719bc 100644 --- a/examples/evaluation/dataset-generator/generate.py +++ b/examples/evaluation/dataset-generator/generate.py @@ -1,63 +1,21 @@ -from distilabel.llms import OpenAILLM -from distilabel.pipeline import Pipeline -import asyncio -from ragbits.evaluate.dataset_generator.tasks.text_generation.qa import AnswerGenTask, PassagesGenTask, QueryGenTask -from ragbits.evaluate.dataset_generator.tasks.filter.dont_know import DontKnowFilter +import hydra +from omegaconf import DictConfig +from ragbits.evaluate.dataset_generator.pipeline import DatasetGenerationPipeline +from ragbits.evaluate.utils import log_dataset_to_file -from datasets import Dataset +@hydra.main(config_path="config", config_name="generate", version_base="3.2") +def main(config: DictConfig): + FACTOIDS = [ + "Neural networks are algorithms capable of recognition and processing the data structure", + "Warsaw is capital of Poland", + "Ambafatima", + ] -from pathlib import Path + generation_pipeline = DatasetGenerationPipeline(config=config) + result_dataset = generation_pipeline(corpus=FACTOIDS) + log_dataset_to_file(dataset=result_dataset) -CORPUS_PATH = Path("osha3192.pdf") -DATASET_HF_PATH = "osho" - - -async def main(): - # router = DocumentProcessorRouter( - # providers={DocumentType.PDF: UnstructuredPdfProvider(chunking_kwargs={"max_characters": 512})} - # ) - # document_meta = DocumentMeta.from_local_path(local_path=CORPUS_PATH) - # document_processor = router.get_provider(document_meta) - # elements = await document_processor.process(document_meta) - FACTOIDS = ["Neural networks are algorithms capable of recognition and processing the data structure", "Warsaw is capital of Poland", "Ambafatima"] - dataset = Dataset.from_dict({ - "chunk": FACTOIDS - }) - - with Pipeline("synthetic-RAG-data") as pipeline: - query_gen_task = QueryGenTask( - llm=OpenAILLM(model="gpt-4o"), - ) - - answer_gen_task = AnswerGenTask( - llm=OpenAILLM(model="gpt-4o") - ) - - - passages_gen_task = PassagesGenTask( - llm=OpenAILLM(model="gpt-4o"), - ) - - dont_know_filter = DontKnowFilter(task=answer_gen_task) - - - query_gen_task >> answer_gen_task >> passages_gen_task >> dont_know_filter - - distiset = pipeline.run( - use_cache=False, - dataset=dataset - ) - result = distiset["default"]["train"] - result = result.remove_columns(["distilabel_metadata", "model_name"]) - - breakpoint() - result.push_to_hub( - DATASET_HF_PATH, - private=False, - ) if __name__ == "__main__": - asyncio.run(main()) - - + main() diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py new file mode 100644 index 00000000..6d9fe694 --- /dev/null +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py @@ -0,0 +1,39 @@ +import sys +from distilabel.pipeline import Pipeline +from distilabel.steps.base import Step +from omegaconf import DictConfig +from datasets import Dataset +from ragbits.core.utils.config_handling import get_cls_from_config + +module = sys.modules[__name__] + + +class DatasetGenerationPipeline: + def __init__(self, config: DictConfig): + self.config = config + + def __call__(self, corpus: list[str]) -> Dataset: + dataset = Dataset.from_dict({self.config.input_name: corpus}) + with Pipeline(self.config.pipeline.name) as pipeline: + tasks = self._parse_pipeline_steps() + prev_task = None + for task in tasks: + if prev_task: + prev_task >> task + prev_task = task + distiset = pipeline.run(use_cache=False, dataset=dataset) + result = distiset["default"]["train"] + result = result.remove_columns(["distilabel_metadata", "model_name"]) + return result + + def _parse_pipeline_steps(self) -> list[Step]: + tasks = [] + for task_config in self.config.pipeline.tasks: + llm_config = task_config.llm + llm = get_cls_from_config(llm_config.provider_type, module)(model=llm_config.name) + task = get_cls_from_config(task_config.type, module)(llm=llm) + tasks.append(task) + if getattr(task_config, "filter", None): + filter = get_cls_from_config(task_config.filter, module)(task) + tasks.append(filter) + return tasks diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/qa.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/qa.py index 23bed1b5..e20ef66e 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/qa.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/qa.py @@ -8,7 +8,6 @@ class BasicAnswerGenInput(BaseModel): class BasicAnswerGenPrompt(Prompt[BasicAnswerGenInput, str]): - system_prompt = """You are an AI assistant to answer the given question in the provide evidence text. Do not mention any of these in the answer: "in the given text", "in the provided information", etc. Users do not know the passage source of the answer, so it should not be mentioned in the answer. @@ -25,7 +24,6 @@ class PassagesGenInput(BaseModel): class PassagesGenPrompt(Prompt[PassagesGenInput, str]): - system_prompt = """You are an AI tasked with retrieving passages (one or many) from the provided Chunk that that contain information needed to generate the provided Answer to the given Question. Instructions: @@ -43,7 +41,6 @@ class QueryGenInput(BaseModel): class QueryGenPrompt(Prompt[QueryGenInput, str]): - system_prompt = """You're an AI tasked to convert Text into a factoid question. Factoid questions are those seeking brief, factual information that can be easily verified. They typically require a yes or no answer or a brief explanation and often inquire about specific details such as dates, names, places, or events. diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/steps/dontknot_filter_step.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/steps/dontknot_filter_step.py index 8723cf99..19101fe9 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/steps/dontknot_filter_step.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/steps/dontknot_filter_step.py @@ -4,20 +4,18 @@ from distilabel.steps.base import GeneratorStep + def dontknow_filter_rule_based(row: dict[str, Any], dont_know_phrases: list[str]) -> bool: + return not any(phrase in s for phrase in dont_know_phrases for s in row["basic_answer"]) - return not any( - phrase in s for phrase in dont_know_phrases for s in row["basic_answer"] - ) class DontKnowFilter(GeneratorStep): - data: list[dict[str, Any]] = Field(default_factory=list, exclude=True) dont_know_phrases: list[str] = [ - "I don't know", - "I do not know", - "don't know", - ] + "I don't know", + "I do not know", + "don't know", + ] @staticmethod def _transform_data(data: List[Dict[str, Any]]) -> List[Dict[str, Any]]: @@ -27,4 +25,4 @@ def _transform_data(data: List[Dict[str, Any]]) -> List[Dict[str, Any]]: @property def outputs(self) -> List[str]: # TODO - pass \ No newline at end of file + pass diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/base.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/base.py index 060d1ebf..d595af73 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/base.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/base.py @@ -18,4 +18,4 @@ def outputs(self) -> "StepColumns": @abstractmethod def process(self, *inputs: StepInput) -> "StepOutput": - pass \ No newline at end of file + pass diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py index ac96052f..dca4d9bc 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py @@ -5,17 +5,17 @@ DONT_KNOW_PHRASES: list[str] = [ - "I don't know", - "I do not know", - "don't know", - ] + "I don't know", + "I do not know", + "don't know", +] class DontKnowFilter(BaseFilter): - - def process(self, *inputs: StepInput) -> "StepOutput": - result = [{input_type: inp[input_type] for input_type in inp} for inp in inputs[0] if not self._is_dont_know(inp)] + result = [ + {input_type: inp[input_type] for input_type in inp} for inp in inputs[0] if not self._is_dont_know(inp) + ] yield result @staticmethod diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py index ffccbd69..b1d3e1ec 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py @@ -34,9 +34,7 @@ def format_input(self, input: dict[str, Any]) -> ChatFormat: return chat @abstractmethod - def format_output( - self, output: str, input: dict[str, Any] | None = None - ) -> dict[str, str]: + def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str]: """ Formats the generated question into a structured dictionary with the original "chunk" input. diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py index f42bc8f3..6bc1345d 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py @@ -6,7 +6,6 @@ from ...utils import get_closest_substring, get_passages_list - from .base import BaseDistilabelTask @@ -18,9 +17,7 @@ class QueryGenTask(BaseDistilabelTask): def __init__(self, llm: LLM, prompt_class: type[Prompt] = QueryGenPrompt): super().__init__(llm=llm, inputs=["chunk"], outputs=["question", "chunk"], prompt_class=prompt_class) - def format_output( - self, output: str, input: dict[str, Any] | None = None - ) -> dict[str, str]: + def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str]: """ Formats the generated question into a structured dictionary with the original "chunk" input. @@ -42,13 +39,14 @@ class PassagesGenTask(BaseDistilabelTask): get_matches: bool = False def __init__(self, llm: LLM, prompt_class: type[Prompt] = PassagesGenPrompt): - super().__init__(llm=llm, inputs=["chunk", "question", "basic_answer"], - outputs=["question", "chunk", "passages"], prompt_class=prompt_class) - - - def format_output( - self, output: str, input: dict[str, Any] | None = None - ) -> dict[str, list[str]]: + super().__init__( + llm=llm, + inputs=["chunk", "question", "basic_answer"], + outputs=["question", "chunk", "passages"], + prompt_class=prompt_class, + ) + + def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, list[str]]: """ Formats the model's output into a structured dictionary with "question", "chunk", and "passages". If `get_matches` is `True`, attempts to find the closest matches for each passage within the @@ -88,9 +86,7 @@ class AnswerGenTask(BaseDistilabelTask): def __init__(self, llm: LLM, prompt_class: type[Prompt] = BasicAnswerGenPrompt): super().__init__(llm=llm, inputs=["chunk", "question"], outputs=["basic_answer"], prompt_class=prompt_class) - def format_output( - self, output: str, input: dict[str, Any] | None = None - ) -> dict[str, str]: + def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str]: """ Formats the model's output into a structured dictionary with the "basic_answer" key. @@ -101,4 +97,4 @@ def format_output( Returns: A dictionary with "basic_answer" as the key and the generated output as its value. """ - return {"basic_answer": output} \ No newline at end of file + return {"basic_answer": output} diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py index de4605a1..88c1b217 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py @@ -2,12 +2,14 @@ from difflib import SequenceMatcher from itertools import combinations + def get_closest_substring(long, short): a, b = max( - combinations(re.finditer('|'.join(short.split()), long), 2), - key=lambda c: SequenceMatcher(None, long[c[0].start():c[1].end()], short).ratio() + combinations(re.finditer("|".join(short.split()), long), 2), + key=lambda c: SequenceMatcher(None, long[c[0].start() : c[1].end()], short).ratio(), ) - return long[a.start():b.end()] + return long[a.start() : b.end()] + def get_passages_list(raw_passages: str) -> list[str]: passages = raw_passages.split("[")[1] diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py b/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py index 2b801424..05acbc49 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py @@ -4,6 +4,7 @@ from pathlib import Path from typing import Any +from datasets import Dataset from hydra.core.hydra_config import HydraConfig from neptune import Run from neptune.utils import stringify_unsupported @@ -28,7 +29,7 @@ def _save(file_path: Path, **data: Any) -> None: # noqa: ANN401 json.dump(data, file, indent=4) -def log_to_file(results: dict[str, Any], output_dir: Path | None = None) -> Path: +def log_dataset_to_file(dataset: Dataset, output_dir: Path | None = None) -> Path: """ Log the evaluation results locally. @@ -40,12 +41,8 @@ def log_to_file(results: dict[str, Any], output_dir: Path | None = None) -> Path The output directory. """ output_dir = output_dir or Path(HydraConfig.get().runtime.output_dir) - metrics_file = output_dir / "metrics.json" - results_file = output_dir / "results.json" - - _save(metrics_file, metrics=results["metrics"], time_perf=results["time_perf"]) - _save(results_file, results=results["results"]) - + dataset_file = output_dir / "dataset.hf" + dataset.save_to_disk(dataset_path=str(dataset_file)) return output_dir From 103734561ae7f3b5b5964c29ace7f32f428b20bd Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Thu, 7 Nov 2024 20:18:57 +0100 Subject: [PATCH 04/26] add corpus generation --- .../dataset-generator/config/generate.yaml | 2 +- .../dataset-generator/config/pipeline/qa.yaml | 5 +++ .../evaluation/dataset-generator/generate.py | 10 +++--- .../evaluate/dataset_generator/pipeline.py | 10 ++++-- .../prompts/corpus_generation.py | 12 +++++++ .../tasks/corpus_generation.py | 36 +++++++++++++++++++ .../tasks/text_generation/base.py | 2 +- .../tasks/text_generation/qa.py | 8 ++--- 8 files changed, 72 insertions(+), 13 deletions(-) create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/corpus_generation.py create mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py diff --git a/examples/evaluation/dataset-generator/config/generate.yaml b/examples/evaluation/dataset-generator/config/generate.yaml index 6fe0fa5c..75440836 100644 --- a/examples/evaluation/dataset-generator/config/generate.yaml +++ b/examples/evaluation/dataset-generator/config/generate.yaml @@ -2,4 +2,4 @@ defaults: - pipeline: qa - _self_ -input_name: chunk +input_name: query diff --git a/examples/evaluation/dataset-generator/config/pipeline/qa.yaml b/examples/evaluation/dataset-generator/config/pipeline/qa.yaml index b88edab9..58cd2e0b 100644 --- a/examples/evaluation/dataset-generator/config/pipeline/qa.yaml +++ b/examples/evaluation/dataset-generator/config/pipeline/qa.yaml @@ -1,5 +1,10 @@ name: synthetic-RAG-data tasks: + - type: ragbits.evaluate.dataset_generator.tasks.corpus_generation:CorpusGenerationStep + llm: + provider_type: ragbits.core.llms.litellm:LiteLLM + name: gpt-4o + num_per_query: 5 - type: ragbits.evaluate.dataset_generator.tasks.text_generation.qa:QueryGenTask llm: provider_type: distilabel.llms:OpenAILLM diff --git a/examples/evaluation/dataset-generator/generate.py b/examples/evaluation/dataset-generator/generate.py index c5b719bc..9646a91c 100644 --- a/examples/evaluation/dataset-generator/generate.py +++ b/examples/evaluation/dataset-generator/generate.py @@ -6,14 +6,14 @@ @hydra.main(config_path="config", config_name="generate", version_base="3.2") def main(config: DictConfig): - FACTOIDS = [ - "Neural networks are algorithms capable of recognition and processing the data structure", - "Warsaw is capital of Poland", - "Ambafatima", + TOPICS = [ + "conspiracy theories", + "machine learning" ] generation_pipeline = DatasetGenerationPipeline(config=config) - result_dataset = generation_pipeline(corpus=FACTOIDS) + result_dataset = generation_pipeline(corpus=TOPICS) + breakpoint() log_dataset_to_file(dataset=result_dataset) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py index 6d9fe694..739f7e39 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py @@ -30,8 +30,14 @@ def _parse_pipeline_steps(self) -> list[Step]: tasks = [] for task_config in self.config.pipeline.tasks: llm_config = task_config.llm - llm = get_cls_from_config(llm_config.provider_type, module)(model=llm_config.name) - task = get_cls_from_config(task_config.type, module)(llm=llm) + provider_type = llm_config.provider_type + if provider_type.startswith("distilabel"): + llm_kwargs = {"model": llm_config.name} + elif provider_type.startswith("ragbits"): + llm_kwargs = {"model_name": llm_config.name} + llm = get_cls_from_config(llm_config.provider_type, module)(**llm_kwargs) + task_kwargs = {"llm": llm, "num_per_query": getattr(task_config, "num_per_query", 1)} + task = get_cls_from_config(task_config.type, module)(**task_kwargs) tasks.append(task) if getattr(task_config, "filter", None): filter = get_cls_from_config(task_config.filter, module)(task) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/corpus_generation.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/corpus_generation.py new file mode 100644 index 00000000..43b4fd18 --- /dev/null +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/corpus_generation.py @@ -0,0 +1,12 @@ +from pydantic import BaseModel +from ragbits.core.prompt import Prompt + + +class BasicCorpusGenerationPromptInput(BaseModel): + query: str + + +class BasicCorpusGenerationPrompt(Prompt[BasicCorpusGenerationPromptInput]): + system_prompt: str = "You are a provider of random factoids on topic requested by a user. Use very few tokens and sentence equivalents" + user_prompt: str = "Provide factoids about {{ query }}" + diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py new file mode 100644 index 00000000..53a73eb9 --- /dev/null +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py @@ -0,0 +1,36 @@ +import asyncio +from distilabel.steps import StepInput +from distilabel.steps.base import Step + + +from ragbits.core.prompt import Prompt +from ragbits.core.llms.base import LLM +from copy import deepcopy +from ..prompts.corpus_generation import BasicCorpusGenerationPromptInput, BasicCorpusGenerationPrompt + + +class CorpusGenerationStep(Step): + def __init__(self, llm: LLM, num_per_query: int, prompt_class: type[Prompt] = BasicCorpusGenerationPrompt, **kwargs): + super().__init__(**kwargs) + self._llm = llm + self._prompt_class = prompt_class + self._num_per_query = num_per_query + + @property + def inputs(self) -> "StepColumns": + return ["query"] + + @property + def outputs(self) -> "StepColumns": + return ["chunk"] + + def process(self, *inputs: StepInput) -> "StepOutput": + result = [] + for inp in inputs[0]: + for _ in range(self._num_per_query): + new_inp = deepcopy(inp) + prompt_inp = self._prompt_class.input_type(**{self.inputs[0]: new_inp[self.inputs[0]]}) + new_inp[self.outputs[0]] = asyncio.get_event_loop().run_until_complete(self._llm.generate(prompt=self._prompt_class(prompt_inp))) + result.append(new_inp) + yield result + diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py index b1d3e1ec..c82dc677 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py @@ -6,7 +6,7 @@ class BaseDistilabelTask(TextGeneration, ABC): - def __init__(self, llm: LLM, inputs: list[str], outputs: list[str], prompt_class: type[Prompt]): + def __init__(self, llm: LLM, inputs: list[str], outputs: list[str], prompt_class: type[Prompt], **kwargs): super().__init__(llm=llm) self._inputs = inputs self._outputs = outputs diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py index 6bc1345d..5d52ab06 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py @@ -14,8 +14,8 @@ class QueryGenTask(BaseDistilabelTask): A task for generating a question based on a provided text chunk. """ - def __init__(self, llm: LLM, prompt_class: type[Prompt] = QueryGenPrompt): - super().__init__(llm=llm, inputs=["chunk"], outputs=["question", "chunk"], prompt_class=prompt_class) + def __init__(self, llm: LLM, prompt_class: type[Prompt] = QueryGenPrompt, **kwargs): + super().__init__(llm=llm, inputs=["chunk"], outputs=["question", "chunk"], prompt_class=prompt_class, **kwargs) def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str]: """ @@ -38,7 +38,7 @@ class PassagesGenTask(BaseDistilabelTask): get_matches: bool = False - def __init__(self, llm: LLM, prompt_class: type[Prompt] = PassagesGenPrompt): + def __init__(self, llm: LLM, prompt_class: type[Prompt] = PassagesGenPrompt, **kwargs): super().__init__( llm=llm, inputs=["chunk", "question", "basic_answer"], @@ -83,7 +83,7 @@ class AnswerGenTask(BaseDistilabelTask): the `TextGeneration` task from the `distilabel` package. """ - def __init__(self, llm: LLM, prompt_class: type[Prompt] = BasicAnswerGenPrompt): + def __init__(self, llm: LLM, prompt_class: type[Prompt] = BasicAnswerGenPrompt, **kwargs): super().__init__(llm=llm, inputs=["chunk", "question"], outputs=["basic_answer"], prompt_class=prompt_class) def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str]: From e2957b386f4334ef745880aec1d4d1df9b077df4 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Tue, 12 Nov 2024 12:28:01 +0100 Subject: [PATCH 05/26] fix ruff --- .../dataset-generator/config/pipeline/qa.yaml | 25 ++++-- .../evaluation/dataset-generator/generate.py | 6 +- .../evaluate/dataset_generator/pipeline.py | 32 ++++--- .../prompts/corpus_generation.py | 11 ++- .../evaluate/dataset_generator/prompts/qa.py | 90 ++++++++++++------- .../steps/dontknot_filter_step.py | 28 ------ .../tasks/corpus_generation.py | 51 ++++++++--- .../dataset_generator/tasks/filter/base.py | 30 ++++++- .../tasks/filter/dont_know.py | 13 ++- .../tasks/text_generation/base.py | 28 ++++-- .../tasks/text_generation/qa.py | 17 ++-- .../evaluate/dataset_generator/utils.py | 35 +++++++- .../src/ragbits/evaluate/utils.py | 2 +- 13 files changed, 245 insertions(+), 123 deletions(-) delete mode 100644 packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/steps/dontknot_filter_step.py diff --git a/examples/evaluation/dataset-generator/config/pipeline/qa.yaml b/examples/evaluation/dataset-generator/config/pipeline/qa.yaml index 58cd2e0b..b46ebb8a 100644 --- a/examples/evaluation/dataset-generator/config/pipeline/qa.yaml +++ b/examples/evaluation/dataset-generator/config/pipeline/qa.yaml @@ -3,18 +3,31 @@ tasks: - type: ragbits.evaluate.dataset_generator.tasks.corpus_generation:CorpusGenerationStep llm: provider_type: ragbits.core.llms.litellm:LiteLLM - name: gpt-4o - num_per_query: 5 + kwargs: + model_name: gpt-4o + kwargs: + num_per_query: 5 + prompt_class: ragbits.evaluate.dataset_generator.prompts.corpus_generation:BasicCorpusGenerationPrompt - type: ragbits.evaluate.dataset_generator.tasks.text_generation.qa:QueryGenTask llm: provider_type: distilabel.llms:OpenAILLM - name: gpt-4o + kwargs: + model: gpt-4o + kwargs: + prompt_class: ragbits.evaluate.dataset_generator.prompts.qa:QueryGenPrompt - type: ragbits.evaluate.dataset_generator.tasks.text_generation.qa:AnswerGenTask llm: provider_type: distilabel.llms:OpenAILLM - name: gpt-4o + kwargs: + model: gpt-4o + kwargs: + prompt_class: ragbits.evaluate.dataset_generator.prompts.qa:BasicAnswerGenPrompt - type: ragbits.evaluate.dataset_generator.tasks.text_generation.qa:PassagesGenTask llm: provider_type: distilabel.llms:OpenAILLM - name: gpt-4o - filter: ragbits.evaluate.dataset_generator.tasks.filter.dont_know:DontKnowFilter + kwargs: + model: gpt-4o + kwargs: + prompt_class: ragbits.evaluate.dataset_generator.prompts.qa:PassagesGenPrompt + filters: + - ragbits.evaluate.dataset_generator.tasks.filter.dont_know:DontKnowFilter diff --git a/examples/evaluation/dataset-generator/generate.py b/examples/evaluation/dataset-generator/generate.py index 9646a91c..cdb983df 100644 --- a/examples/evaluation/dataset-generator/generate.py +++ b/examples/evaluation/dataset-generator/generate.py @@ -1,15 +1,13 @@ import hydra from omegaconf import DictConfig + from ragbits.evaluate.dataset_generator.pipeline import DatasetGenerationPipeline from ragbits.evaluate.utils import log_dataset_to_file @hydra.main(config_path="config", config_name="generate", version_base="3.2") def main(config: DictConfig): - TOPICS = [ - "conspiracy theories", - "machine learning" - ] + TOPICS = ["conspiracy theories", "machine learning"] generation_pipeline = DatasetGenerationPipeline(config=config) result_dataset = generation_pipeline(corpus=TOPICS) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py index 739f7e39..fd1682af 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py @@ -1,18 +1,29 @@ import sys + +from datasets import Dataset from distilabel.pipeline import Pipeline from distilabel.steps.base import Step -from omegaconf import DictConfig -from datasets import Dataset +from omegaconf import DictConfig, OmegaConf + from ragbits.core.utils.config_handling import get_cls_from_config module = sys.modules[__name__] class DatasetGenerationPipeline: + """A pipeline for dataset generation""" + def __init__(self, config: DictConfig): self.config = config def __call__(self, corpus: list[str]) -> Dataset: + """ + Generates a dataset from a corpus or list of topics + Args: + corpus: a corpus of information or list of topics + Returns: + dataset instance + """ dataset = Dataset.from_dict({self.config.input_name: corpus}) with Pipeline(self.config.pipeline.name) as pipeline: tasks = self._parse_pipeline_steps() @@ -30,16 +41,15 @@ def _parse_pipeline_steps(self) -> list[Step]: tasks = [] for task_config in self.config.pipeline.tasks: llm_config = task_config.llm - provider_type = llm_config.provider_type - if provider_type.startswith("distilabel"): - llm_kwargs = {"model": llm_config.name} - elif provider_type.startswith("ragbits"): - llm_kwargs = {"model_name": llm_config.name} + llm_kwargs = OmegaConf.to_container(llm_config.kwargs) llm = get_cls_from_config(llm_config.provider_type, module)(**llm_kwargs) - task_kwargs = {"llm": llm, "num_per_query": getattr(task_config, "num_per_query", 1)} + task_kwargs = {"llm": llm} + if getattr(task_config, "kwargs", None): + task_kwargs.update(OmegaConf.to_container(task_config.kwargs)) task = get_cls_from_config(task_config.type, module)(**task_kwargs) tasks.append(task) - if getattr(task_config, "filter", None): - filter = get_cls_from_config(task_config.filter, module)(task) - tasks.append(filter) + if getattr(task_config, "filters", None): + for filter_type in task_config.filters: + filter = get_cls_from_config(filter_type, module)(tasks[-1]) + tasks.append(filter) return tasks diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/corpus_generation.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/corpus_generation.py index 43b4fd18..7e7022ff 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/corpus_generation.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/corpus_generation.py @@ -1,12 +1,19 @@ from pydantic import BaseModel + from ragbits.core.prompt import Prompt class BasicCorpusGenerationPromptInput(BaseModel): + """A definition of input for corpus generation task""" + query: str class BasicCorpusGenerationPrompt(Prompt[BasicCorpusGenerationPromptInput]): - system_prompt: str = "You are a provider of random factoids on topic requested by a user. Use very few tokens and sentence equivalents" - user_prompt: str = "Provide factoids about {{ query }}" + """A basic prompt for corpus generation""" + system_prompt: str = ( + "You are a provider of random factoids on topic requested by a user.", + "Do not write a long essays, but short random facts about a given topic" "Use as few tokens as possible", + ) + user_prompt: str = "Provide factoids about {{ query }}" diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/qa.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/qa.py index e20ef66e..40e29078 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/qa.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/qa.py @@ -1,63 +1,85 @@ from pydantic import BaseModel + from ragbits.core.prompt import Prompt class BasicAnswerGenInput(BaseModel): + """An input definition for basic answer generation task""" + chunk: str question: str class BasicAnswerGenPrompt(Prompt[BasicAnswerGenInput, str]): - system_prompt = """You are an AI assistant to answer the given question in the provide evidence text. - Do not mention any of these in the answer: "in the given text", "in the provided information", etc. -Users do not know the passage source of the answer, so it should not be mentioned in the answer. -You can find the evidence from the given text about question, and you have to write a proper answer to the given question. -If you don't know the answer just say: I don't know.""" + """A prompt clas for basic answers generation""" + + system_prompt: str = ( + "You are an AI assistant to answer the given question in the provided " + "evidence text. Do not mention any of these in the answer: 'in the " + "given text', 'in the provided information', etc. Users do not know " + "the passage source of the answer, so it should not be mentioned in " + "the answer. You can find the evidence from the given text about the " + "question, and you have to write a proper answer to the given question. " + "If you don't know the answer just say: I don't know." + ) - user_prompt = "Text:\n<|text_start|>\n {{ chunk }} \n<|text_end|>\n\nQuestion:\n {{ question }} \n\nAnswer:" + user_prompt: str = "Text:\n<|text_start|>\n {{ chunk }} \n<|text_end|>\n\nQuestion:\n " "{{ question }} \n\nAnswer:" class PassagesGenInput(BaseModel): + """An input definition to passage generation prompt""" + question: str basic_answer: str chunk: str class PassagesGenPrompt(Prompt[PassagesGenInput, str]): - system_prompt = """You are an AI tasked with retrieving passages (one or many) from the provided Chunk that that contain information needed to generate the provided Answer to the given Question. + """A prompt class for passages generation""" -Instructions: -1. Each Passage MUST be VERBATIM and EXACT, without any modifications -2. Please provide the response in the form of a Python list. It should begin with "[" and end with "]" -3. You MUST start your answer with "[" -4. The Chunk ALWAYS contains information needed to justify the Answer -5. Each passage must be as BRIEF as possible; DO NOT RETURN FULL SENTENCES""" + system_prompt: str = ( + "You are an AI tasked with retrieving passages (one or many) from the " + "provided Chunk that contain information needed to generate the " + "provided Answer to the given Question.\n\nInstructions:\n1. Each " + "Passage MUST be VERBATIM and EXACT, without any modifications\n2. " + "Please provide the response in the form of a Python list. It should " + "begin with '[' and end with ']'\n3. You MUST start your answer with " + "'['\n4. The Chunk ALWAYS contains information needed to justify the " + "Answer\n5. Each passage must be as BRIEF as possible; DO NOT RETURN " + "FULL SENTENCES" + ) - user_prompt = "Question:\n {{ question }} \nAnswer:\n {{ basic_answer }} \nChunk:\n {{ chunk }}\n\nPassages:" + user_prompt: str = ( + "Question:\n {{ question }} \nAnswer:\n {{ basic_answer }} \nChunk:\n " "{{ chunk }}\n\nPassages:" + ) class QueryGenInput(BaseModel): + """An input definition for query generation prompt""" + chunk: str class QueryGenPrompt(Prompt[QueryGenInput, str]): - system_prompt = """You're an AI tasked to convert Text into a factoid question. -Factoid questions are those seeking brief, factual information that can be easily verified. They typically require a yes or no answer or a brief explanation and often inquire about specific details such as dates, names, places, or events. - -Examples of factoid questions include: - -- What is the incoming shipment report? -- What angle should I set my ladder at? -- What documents do I need to be a proof of transaction? - -Instructions: -1. Questions MUST BE extracted from given Text -2. Questions MUST BE as SHORT a possible -3. Questions should be as detailed as possible from Text -4. Create questions that ask about factual information from the Text -5. Only return ONE question -6. Frame questions in a first-person, INFROMAL style, as if the employee is seeking advice or clarification while working -7. Do not mention any of these in the questions: "in the given text", "in the provided information", etc. -Users do not know the passage source of the question, so it should not be mentioned in the question.""" - - user_prompt = "Text: {{ chunk }}\n\nGenerated Question from the Text:\n" + """A prompt class for query generation""" + + system_prompt: str = ( + "You're an AI tasked to convert Text into a factoid question. Factoid " + "questions are those seeking brief, factual information that can be " + "easily verified. They typically require a yes or no answer or a brief " + "explanation and often inquire about specific details such as dates, " + "names, places, or events.\n\nExamples of factoid questions include:\n" + "- What is the incoming shipment report?\n- What angle should I set my " + "ladder at?\n- What documents do I need to be a proof of transaction?\n\n" + "Instructions:\n1. Questions MUST BE extracted from given Text\n2. " + "Questions MUST BE as SHORT as possible\n3. Questions should be as " + "detailed as possible from Text\n4. Create questions that ask about " + "factual information from the Text\n5. Only return ONE question\n6. " + "Frame questions in a first-person, INFORMAL style, as if the employee " + "is seeking advice or clarification while working\n7. Do not mention any " + "of these in the questions: 'in the given text', 'in the provided " + "information', etc. Users do not know the passage source of the question, " + "so it should not be mentioned in the question." + ) + + user_prompt: str = "Text: {{ chunk }}\n\nGenerated Question from the Text:\n" diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/steps/dontknot_filter_step.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/steps/dontknot_filter_step.py deleted file mode 100644 index 19101fe9..00000000 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/steps/dontknot_filter_step.py +++ /dev/null @@ -1,28 +0,0 @@ -from typing import Any, Dict, List - -from pydantic import Field - -from distilabel.steps.base import GeneratorStep - - -def dontknow_filter_rule_based(row: dict[str, Any], dont_know_phrases: list[str]) -> bool: - return not any(phrase in s for phrase in dont_know_phrases for s in row["basic_answer"]) - - -class DontKnowFilter(GeneratorStep): - data: list[dict[str, Any]] = Field(default_factory=list, exclude=True) - dont_know_phrases: list[str] = [ - "I don't know", - "I do not know", - "don't know", - ] - - @staticmethod - def _transform_data(data: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - # TODO - pass - - @property - def outputs(self) -> List[str]: - # TODO - pass diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py index 53a73eb9..995b216d 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py @@ -1,36 +1,63 @@ import asyncio -from distilabel.steps import StepInput -from distilabel.steps.base import Step +import sys +from copy import deepcopy +from distilabel.steps import StepInput, StepOutput +from distilabel.steps.base import Step -from ragbits.core.prompt import Prompt from ragbits.core.llms.base import LLM -from copy import deepcopy -from ..prompts.corpus_generation import BasicCorpusGenerationPromptInput, BasicCorpusGenerationPrompt +from ragbits.core.utils.config_handling import get_cls_from_config + +module = sys.modules[__name__] class CorpusGenerationStep(Step): - def __init__(self, llm: LLM, num_per_query: int, prompt_class: type[Prompt] = BasicCorpusGenerationPrompt, **kwargs): - super().__init__(**kwargs) + """A step for corpus generation on given topics""" + + def __init__( + self, + llm: LLM, + num_per_query: int, + prompt_class: str, + ): + super().__init__() self._llm = llm - self._prompt_class = prompt_class + self._prompt_class = get_cls_from_config(prompt_class, module) self._num_per_query = num_per_query @property - def inputs(self) -> "StepColumns": + def inputs(self) -> list[str]: + """ + A property defining input fields for a task + Returns: + list of input fields + """ return ["query"] @property - def outputs(self) -> "StepColumns": + def outputs(self) -> list[str]: + """ + A property describing output fields for a step + Returns: + list of output fields + """ return ["chunk"] def process(self, *inputs: StepInput) -> "StepOutput": + """ + Generates the corpus data for a given topics + Args: + inputs: a topics on which the corpus data should be generated + Returns: + a generated corpus + """ result = [] for inp in inputs[0]: for _ in range(self._num_per_query): new_inp = deepcopy(inp) prompt_inp = self._prompt_class.input_type(**{self.inputs[0]: new_inp[self.inputs[0]]}) - new_inp[self.outputs[0]] = asyncio.get_event_loop().run_until_complete(self._llm.generate(prompt=self._prompt_class(prompt_inp))) + new_inp[self.outputs[0]] = asyncio.get_event_loop().run_until_complete( + self._llm.generate(prompt=self._prompt_class(prompt_inp)) + ) result.append(new_inp) yield result - diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/base.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/base.py index d595af73..51ed892a 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/base.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/base.py @@ -1,21 +1,43 @@ from abc import ABC, abstractmethod + from distilabel.steps import Step, StepInput, StepOutput + +from ..corpus_generation import CorpusGenerationStep from ..text_generation.base import BaseDistilabelTask class BaseFilter(Step, ABC): - def __init__(self, task: BaseDistilabelTask, **kwargs): - super().__init__(**kwargs) + """Base class for filtering the outputs of pipeline steps""" + + def __init__(self, task: BaseDistilabelTask | CorpusGenerationStep): + super().__init__() self._task = task @property - def inputs(self) -> "StepColumns": + def inputs(self) -> list[str]: + """ + Property describing input fields for a filter + Returns: + list of input fields for a filter + """ return self._task.outputs @property - def outputs(self) -> "StepColumns": + def outputs(self) -> list[str]: + """ + Property describing output fields for a filter + Returns: + list of output fields for a filter + """ return self._task.outputs @abstractmethod def process(self, *inputs: StepInput) -> "StepOutput": + """ + Abstract method for filter step processing + Args: + inputs - inputs to a filter + Returns: + filtered outputs + """ pass diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py index dca4d9bc..807d4077 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py @@ -1,8 +1,8 @@ from typing import Any -from distilabel.steps import StepInput -from .base import BaseFilter +from distilabel.steps import StepInput, StepOutput +from .base import BaseFilter DONT_KNOW_PHRASES: list[str] = [ "I don't know", @@ -12,7 +12,16 @@ class DontKnowFilter(BaseFilter): + """A class for basic rule-based filtering of don't know anwers""" + def process(self, *inputs: StepInput) -> "StepOutput": + """ + Runs the processing step + Args: + inputs - inputs to the step + Returns: + filtered outputs + """ result = [ {input_type: inp[input_type] for input_type in inp} for inp in inputs[0] if not self._is_dont_know(inp) ] diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py index c82dc677..f605b20a 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py @@ -1,23 +1,41 @@ +import sys from abc import ABC, abstractmethod from typing import Any -from distilabel.steps.tasks import TextGeneration + from distilabel.llms.base import LLM -from ragbits.core.prompt import Prompt, ChatFormat +from distilabel.steps.tasks import TextGeneration + +from ragbits.core.prompt import ChatFormat +from ragbits.core.utils.config_handling import get_cls_from_config + +module = sys.modules[__name__] class BaseDistilabelTask(TextGeneration, ABC): - def __init__(self, llm: LLM, inputs: list[str], outputs: list[str], prompt_class: type[Prompt], **kwargs): + """Base class for distilabel TextGeneration tasks""" + + def __init__(self, llm: LLM, inputs: list[str], outputs: list[str], prompt_class: str): super().__init__(llm=llm) self._inputs = inputs self._outputs = outputs - self._prompt_class = prompt_class + self._prompt_class = get_cls_from_config(prompt_class, module) @property - def inputs(self): + def inputs(self) -> list[str]: + """ + Property describing input fields for a task + Returns: + list of input fields for a task + """ return self._inputs @property def outputs(self) -> list[str]: + """ + Property describing output fields of the task + Returns: + list of outputs for a task + """ return self._outputs def format_input(self, input: dict[str, Any]) -> ChatFormat: diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py index 5d52ab06..a27f4f1c 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py @@ -1,11 +1,8 @@ from typing import Any + from distilabel.llms.base import LLM -from ragbits.core.prompt import Prompt -from ...prompts.qa import BasicAnswerGenPrompt, PassagesGenPrompt, QueryGenPrompt from ...utils import get_closest_substring, get_passages_list - - from .base import BaseDistilabelTask @@ -14,10 +11,10 @@ class QueryGenTask(BaseDistilabelTask): A task for generating a question based on a provided text chunk. """ - def __init__(self, llm: LLM, prompt_class: type[Prompt] = QueryGenPrompt, **kwargs): - super().__init__(llm=llm, inputs=["chunk"], outputs=["question", "chunk"], prompt_class=prompt_class, **kwargs) + def __init__(self, llm: LLM, prompt_class: str): + super().__init__(llm=llm, inputs=["chunk"], outputs=["question", "chunk"], prompt_class=prompt_class) - def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str]: + def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str]: # noqa: PLR6301 """ Formats the generated question into a structured dictionary with the original "chunk" input. @@ -38,7 +35,7 @@ class PassagesGenTask(BaseDistilabelTask): get_matches: bool = False - def __init__(self, llm: LLM, prompt_class: type[Prompt] = PassagesGenPrompt, **kwargs): + def __init__(self, llm: LLM, prompt_class: str): super().__init__( llm=llm, inputs=["chunk", "question", "basic_answer"], @@ -83,10 +80,10 @@ class AnswerGenTask(BaseDistilabelTask): the `TextGeneration` task from the `distilabel` package. """ - def __init__(self, llm: LLM, prompt_class: type[Prompt] = BasicAnswerGenPrompt, **kwargs): + def __init__(self, llm: LLM, prompt_class: str): super().__init__(llm=llm, inputs=["chunk", "question"], outputs=["basic_answer"], prompt_class=prompt_class) - def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str]: + def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str]: # noqa: PLR6301 """ Formats the model's output into a structured dictionary with the "basic_answer" key. diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py index 88c1b217..e29e31b9 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py @@ -1,9 +1,19 @@ +import ast import re +import warnings from difflib import SequenceMatcher from itertools import combinations -def get_closest_substring(long, short): +def get_closest_substring(long: str, short: str) -> str: + """ + Finds the closest substring to short string in longer one + Args: + long: str - longer string + short: str - shorter string + Returns: + closest substring of longer + """ a, b = max( combinations(re.finditer("|".join(short.split()), long), 2), key=lambda c: SequenceMatcher(None, long[c[0].start() : c[1].end()], short).ratio(), @@ -12,6 +22,23 @@ def get_closest_substring(long, short): def get_passages_list(raw_passages: str) -> list[str]: - passages = raw_passages.split("[")[1] - passages = passages.split("]")[0] - return eval("[" + passages + "]") + """ + Formats LLM output to list of passages + Args: + raw_passages: string representing raw passages returned by llm + Returns: + list of parsed passages + """ + match = re.search(r"\[(.*?)\]", raw_passages, re.DOTALL) + + if match: + passages_content = match.group(1) + try: + # Use eval to convert the string to a list, assuming it's a valid list-like format + return ast.literal_eval("[" + passages_content + "]") + except (SyntaxError, ValueError): + warnings.warn("Unable to evaluate the passages content. Check the format.", category=UserWarning) + return [] + else: + warnings.warn(message="No brackets found in the input string.", category=UserWarning) + return [] diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py b/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py index 05acbc49..44e41432 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py @@ -34,7 +34,7 @@ def log_dataset_to_file(dataset: Dataset, output_dir: Path | None = None) -> Pat Log the evaluation results locally. Args: - results: The evaluation results. + dataset: Huggingface dataet to be logged. output_dir: The output directory. Returns: From 6c50b53415836995b665786bf474f9cb848724b2 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Tue, 12 Nov 2024 12:52:40 +0100 Subject: [PATCH 06/26] linter fixing --- .../dataset_generator/prompts/corpus_generation.py | 6 ++++-- .../evaluate/dataset_generator/tasks/corpus_generation.py | 7 +++++-- .../dataset_generator/tasks/text_generation/base.py | 8 +++++--- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/corpus_generation.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/corpus_generation.py index 7e7022ff..3c6aa281 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/corpus_generation.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/prompts/corpus_generation.py @@ -13,7 +13,9 @@ class BasicCorpusGenerationPrompt(Prompt[BasicCorpusGenerationPromptInput]): """A basic prompt for corpus generation""" system_prompt: str = ( - "You are a provider of random factoids on topic requested by a user.", - "Do not write a long essays, but short random facts about a given topic" "Use as few tokens as possible", + "You are a provider of random factoids on topic requested by a user." + "Do not write a long essays, the response for given query should be a single sentence" + "For each query provide only a single fact about a given topic" + "Use as few tokens as possible" ) user_prompt: str = "Provide factoids about {{ query }}" diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py index 995b216d..87002f0a 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py @@ -6,6 +6,7 @@ from distilabel.steps.base import Step from ragbits.core.llms.base import LLM +from ragbits.core.prompt import Prompt from ragbits.core.utils.config_handling import get_cls_from_config module = sys.modules[__name__] @@ -18,11 +19,13 @@ def __init__( self, llm: LLM, num_per_query: int, - prompt_class: str, + prompt_class: str | type[Prompt], ): super().__init__() self._llm = llm - self._prompt_class = get_cls_from_config(prompt_class, module) + self._prompt_class = ( + get_cls_from_config(prompt_class, module) if isinstance(prompt_class, str) else prompt_class + ) self._num_per_query = num_per_query @property diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py index f605b20a..5872a888 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py @@ -5,7 +5,7 @@ from distilabel.llms.base import LLM from distilabel.steps.tasks import TextGeneration -from ragbits.core.prompt import ChatFormat +from ragbits.core.prompt import ChatFormat, Prompt from ragbits.core.utils.config_handling import get_cls_from_config module = sys.modules[__name__] @@ -14,11 +14,13 @@ class BaseDistilabelTask(TextGeneration, ABC): """Base class for distilabel TextGeneration tasks""" - def __init__(self, llm: LLM, inputs: list[str], outputs: list[str], prompt_class: str): + def __init__(self, llm: LLM, inputs: list[str], outputs: list[str], prompt_class: str | type[Prompt]): super().__init__(llm=llm) self._inputs = inputs self._outputs = outputs - self._prompt_class = get_cls_from_config(prompt_class, module) + self._prompt_class = ( + get_cls_from_config(prompt_class, module) if isinstance(prompt_class, str) else prompt_class + ) @property def inputs(self) -> list[str]: From a0b61dc991a80f67d307efc47e36c766e4e5da23 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Tue, 12 Nov 2024 12:56:20 +0100 Subject: [PATCH 07/26] fixing linters --- .../src/ragbits/evaluate/utils.py | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py b/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py index 44e41432..456d3ebe 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py @@ -29,6 +29,27 @@ def _save(file_path: Path, **data: Any) -> None: # noqa: ANN401 json.dump(data, file, indent=4) +def log_to_file(results: dict[str, Any], output_dir: Path | None = None) -> Path: + """ + Log the evaluation results locally. + + Args: + results: The evaluation results. + output_dir: The output directory. + + Returns: + The output directory. + """ + output_dir = output_dir or Path(HydraConfig.get().runtime.output_dir) + metrics_file = output_dir / "metrics.json" + results_file = output_dir / "results.json" + + _save(metrics_file, metrics=results["metrics"], time_perf=results["time_perf"]) + _save(results_file, results=results["results"]) + + return output_dir + + def log_dataset_to_file(dataset: Dataset, output_dir: Path | None = None) -> Path: """ Log the evaluation results locally. From 07e51266004b910155b9e92e7204d86083df93a1 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Tue, 12 Nov 2024 14:39:17 +0100 Subject: [PATCH 08/26] add howto --- docs/how-to/generate_dataset.md | 209 ++++++++++++++++++ .../evaluate/dataset_generator/pipeline.py | 5 +- .../tasks/corpus_generation.py | 2 +- .../tasks/text_generation/base.py | 4 +- .../tasks/text_generation/qa.py | 20 +- 5 files changed, 225 insertions(+), 15 deletions(-) create mode 100644 docs/how-to/generate_dataset.md diff --git a/docs/how-to/generate_dataset.md b/docs/how-to/generate_dataset.md new file mode 100644 index 00000000..e0924dc5 --- /dev/null +++ b/docs/how-to/generate_dataset.md @@ -0,0 +1,209 @@ +# Generating a Dataset with Ragbits + +Ragbits offers a convenient feature to generate artificial QA datasets for evaluating Retrieval-Augmented Generation (RAG) systems. You can choose between two different approaches: + +## Available Stacks + +1. **FromScratch**: + - This option allows you to create a complete QA dataset from scratch. + - **How it works**: You provide a list of topics, and the system automatically generates both the corpus and the QA dataset. + +2. **FromCorpus**: + - This approach uses an existing textual corpus. + - **How it works**: You supply a pre-existing corpus, such as documents you’ve previously retrieved, and the system creates the QA dataset based on it. + +## Usage Examples + +Below are examples demonstrating how to use both approaches. + + +### From Scratch + + +```python +import json + +from datasets import Dataset +from omegaconf import OmegaConf +from ragbits.evaluate.dataset_generator.pipeline import DatasetGenerationPipeline + + +def print_dataset(dataset: Dataset): + entries = [] + for idx, (question, answer, passage) in enumerate( + zip(dataset["question"], dataset["basic_answer"], dataset["passages"]) + ): + entries.append( + f"{idx}. QUESTION: {question} ANSWER: {answer} PASSAGES: {json.dumps(passage)}" + ) + print("\r\n".join(entries)) + +pipeline_config = OmegaConf.create( + { + "input_name": "query", + "pipeline": { + "name": "synthetic-RAG-data", + "tasks": [ + { + "type": "ragbits.evaluate.dataset_generator.tasks.corpus_generation:CorpusGenerationStep", + "llm": { + "provider_type": "ragbits.core.llms.litellm:LiteLLM", + "kwargs": {"model_name": "gpt-4o"}, + }, + "kwargs": { + "num_per_query": 5, + "prompt_class": "ragbits.evaluate.dataset_generator.prompts.corpus_generation:BasicCorpusGenerationPrompt", + }, + }, + { + "type": "ragbits.evaluate.dataset_generator.tasks.text_generation.qa:QueryGenTask", + "llm": { + "provider_type": "distilabel.llms:OpenAILLM", + "kwargs": {"model": "gpt-4o"}, + }, + "kwargs": { + "prompt_class": "ragbits.evaluate.dataset_generator.prompts.qa:QueryGenPrompt" + }, + }, + { + "type": "ragbits.evaluate.dataset_generator.tasks.text_generation.qa:AnswerGenTask", + "llm": { + "provider_type": "distilabel.llms:OpenAILLM", + "kwargs": {"model": "gpt-4o"}, + }, + "kwargs": { + "prompt_class": "ragbits.evaluate.dataset_generator.prompts.qa:BasicAnswerGenPrompt" + }, + }, + { + "type": "ragbits.evaluate.dataset_generator.tasks.text_generation.qa:PassagesGenTask", + "llm": { + "provider_type": "distilabel.llms:OpenAILLM", + "kwargs": {"model": "gpt-4o"}, + }, + "kwargs": { + "prompt_class": "ragbits.evaluate.dataset_generator.prompts.qa:PassagesGenPrompt" + }, + "filters": [ + "ragbits.evaluate.dataset_generator.tasks.filter.dont_know:DontKnowFilter" + ], + }, + ], + }, + } +) + + +topics = ["conspiracy theories", "retrival augmented generation"] +pipeline = DatasetGenerationPipeline(pipeline_config) +dataset = pipeline(topics) +print_dataset(dataset) +``` + +After the succesful execution your console should display output with the followig structure: + +```text +0. QUESTION: Is there a theory that suggests the Earth is flat? ANSWER: Yes, the "Flat Earth" theory suggests that the Earth is a flat disc rather than a sphere. PASSAGES: ["The 'Flat Earth' theory suggests that the Earth is a flat disc rather than a sphere."] +1. QUESTION: Was the 1969 moon landing really staged by NASA? ANSWER: No, the 1969 moon landing was not staged by NASA. It was a real event where astronauts from the Apollo 11 mission landed on the moon. The conspiracy theory claiming it was staged is false. PASSAGES: ["The moon landing conspiracy theory falsely claims the 1969 moon landing was staged by NASA."] +2. QUESTION: Is the Earth really flat instead of round? ANSWER: No, the Earth is not flat. Scientific evidence overwhelmingly supports that Earth is an oblate spheroid, which means it is mostly spherical but slightly flattened at the poles and bulging at the equator. PASSAGES: ["scientific evidence overwhelmingly supports that Earth is an oblate spheroid, which means it is mostly spherical but slightly flattened at the poles and bulging at the equator"] +3. QUESTION: Who claims the moon landing was staged in 1969? ANSWER: The moon landing conspiracy theory claims it was staged by NASA in 1969. PASSAGES: ["The moon landing conspiracy theory claims it was staged by NASA in 1969."] +4. QUESTION: How does retrieval augmented generation improve accuracy? ANSWER: Retrieval augmented generation improves accuracy by combining pretrained language models with a retrieval component, allowing the model to access and incorporate relevant information from external data sources during the generation process. PASSAGES: ["Retrieval augmented generation (RAG) combines pretrained language models with a retrieval component to enhance accuracy."] +5. QUESTION: How does retrieval-augmented generation improve response accuracy and relevancy? ANSWER: Retrieval-augmented generation improves response accuracy and relevancy by combining retrieved information with language models. This approach allows the model to incorporate relevant data from external sources, which enhances its ability to generate more accurate and contextually appropriate responses. PASSAGES: ["Retrieval-augmented generation combines retrieved information with language models to improve response accuracy and relevancy."] +6. QUESTION: How does retrieval-augmented generation work to improve response accuracy? ANSWER: Retrieval-augmented generation improves response accuracy by combining information retrieval with text generation. This approach involves retrieving relevant information from a database or other sources and using that information to generate more accurate and informed responses. PASSAGES: ["Retrieval-augmented generation combines information retrieval with text generation to enhance response accuracy."] +7. QUESTION: How does retrieval augmented generation work? ANSWER: Retrieval augmented generation works by combining language models with an external information retrieval system. This approach allows the model to access and incorporate relevant data from an external source, enhancing the generation of responses or content with up-to-date or specific information it might not have inherently. PASSAGES: ["Retrieval augmented generation combines language models with external information retrieval."] +8. QUESTION: How does retrieval-augmented generation improve AI responses? ANSWER: Retrieval-augmented generation improves AI responses by combining the retrieval of relevant documents with text generation, providing enhanced context for the responses. PASSAGES: ["retrieval of relevant documents", "text generation for improved context"] +``` + +Please note that the results may differ among the runs due to undeterministic nature of LLM. + + +### From Corpus + +The code would be very similar as previously - the only differences are: + +* removal of first task from the tasks list in pipeline config +* change of input name from `query` to `chunk` + + +```python +import json + +from datasets import Dataset +from omegaconf import OmegaConf +from ragbits.evaluate.dataset_generator.pipeline import DatasetGenerationPipeline + +pipeline_config = OmegaConf.create( + { + "input_name": "chunk", + "pipeline": { + "name": "synthetic-RAG-data", + "tasks": [ + { + "type": "ragbits.evaluate.dataset_generator.tasks.text_generation.qa:QueryGenTask", + "llm": { + "provider_type": "distilabel.llms:OpenAILLM", + "kwargs": {"model": "gpt-4o"}, + }, + "kwargs": { + "prompt_class": "ragbits.evaluate.dataset_generator.prompts.qa:QueryGenPrompt" + }, + }, + { + "type": "ragbits.evaluate.dataset_generator.tasks.text_generation.qa:AnswerGenTask", + "llm": { + "provider_type": "distilabel.llms:OpenAILLM", + "kwargs": {"model": "gpt-4o"}, + }, + "kwargs": { + "prompt_class": "ragbits.evaluate.dataset_generator.prompts.qa:BasicAnswerGenPrompt" + }, + }, + { + "type": "ragbits.evaluate.dataset_generator.tasks.text_generation.qa:PassagesGenTask", + "llm": { + "provider_type": "distilabel.llms:OpenAILLM", + "kwargs": {"model": "gpt-4o"}, + }, + "kwargs": { + "prompt_class": "ragbits.evaluate.dataset_generator.prompts.qa:PassagesGenPrompt" + }, + "filters": [ + "ragbits.evaluate.dataset_generator.tasks.filter.dont_know:DontKnowFilter" + ], + }, + ], + }, + } +) + + +def print_dataset(dataset: Dataset): + entries = [] + for idx, (question, answer, passage) in enumerate( + zip(dataset["question"], dataset["basic_answer"], dataset["passages"]) + ): + entries.append( + f"{idx}. QUESTION: {question} ANSWER: {answer} PASSAGES: {json.dumps(passage)}" + ) + print("\r\n".join(entries)) + + +topics = [ + "Neural networks are algorithms capable of data structure recognition", + "Large Language Models (LLM) are trained to predict the term given the context", + "Logistic regression is a simpliest form of neural network with no hidden neurons and output activated with sigmoid function", +] +pipeline = DatasetGenerationPipeline(pipeline_config) +dataset = pipeline(topics) +print_dataset(dataset) +``` + +After succesful execution you should see the following output minus the considerations mentioned in [From Scratch](#from-scratch) section: + +```text +0. QUESTION: What are neural networks capable of? ANSWER: Neural networks are capable of data structure recognition. PASSAGES: ["Neural networks are algorithms capable of data structure recognition"] +1. QUESTION: What does LLM stand for? ANSWER: LLM stands for Large Language Models. PASSAGES: ["Large Language Models (LLM)"] +2. QUESTION: What's the simplest form of a neural network? ANSWER: Logistic regression is the simplest form of a neural network, with no hidden neurons and an output activated with a sigmoid function. PASSAGES: ["Logistic regression is a simpliest form of neural network with no hidden neurons and output activated with sigmoid function"] +``` + + diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py index fd1682af..139f0c67 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py @@ -1,4 +1,5 @@ import sys +from typing import Any from datasets import Dataset from distilabel.pipeline import Pipeline @@ -43,9 +44,9 @@ def _parse_pipeline_steps(self) -> list[Step]: llm_config = task_config.llm llm_kwargs = OmegaConf.to_container(llm_config.kwargs) llm = get_cls_from_config(llm_config.provider_type, module)(**llm_kwargs) - task_kwargs = {"llm": llm} + task_kwargs: dict[Any, Any] = {"llm": llm} if getattr(task_config, "kwargs", None): - task_kwargs.update(OmegaConf.to_container(task_config.kwargs)) + task_kwargs.update(OmegaConf.to_container(task_config.kwargs)) #type: ignore task = get_cls_from_config(task_config.type, module)(**task_kwargs) tasks.append(task) if getattr(task_config, "filters", None): diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py index 87002f0a..ccded8db 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py @@ -58,7 +58,7 @@ def process(self, *inputs: StepInput) -> "StepOutput": for inp in inputs[0]: for _ in range(self._num_per_query): new_inp = deepcopy(inp) - prompt_inp = self._prompt_class.input_type(**{self.inputs[0]: new_inp[self.inputs[0]]}) + prompt_inp = self._prompt_class.input_type(**{self.inputs[0]: new_inp[self.inputs[0]]}) #type: ignore new_inp[self.outputs[0]] = asyncio.get_event_loop().run_until_complete( self._llm.generate(prompt=self._prompt_class(prompt_inp)) ) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py index 5872a888..8fd46d6b 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py @@ -50,11 +50,11 @@ def format_input(self, input: dict[str, Any]) -> ChatFormat: Returns: The formatted chat object containing the input for query generation. """ - chat = self._prompt_class(self._prompt_class.input_type(**input)).chat + chat = self._prompt_class(self._prompt_class.input_type(**input)).chat #type: ignore return chat @abstractmethod - def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str]: + def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str | list[str]]: """ Formats the generated question into a structured dictionary with the original "chunk" input. diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py index a27f4f1c..271acdd1 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py @@ -14,7 +14,7 @@ class QueryGenTask(BaseDistilabelTask): def __init__(self, llm: LLM, prompt_class: str): super().__init__(llm=llm, inputs=["chunk"], outputs=["question", "chunk"], prompt_class=prompt_class) - def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str]: # noqa: PLR6301 + def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str | list[str]]: # noqa: PLR6301 """ Formats the generated question into a structured dictionary with the original "chunk" input. @@ -25,7 +25,7 @@ def format_output(self, output: str, input: dict[str, Any] | None = None) -> dic Returns: A dictionary containing "chunk" and "question". """ - return {"chunk": input["chunk"], "question": output} + return {"chunk": input["chunk"], "question": output} #type: ignore class PassagesGenTask(BaseDistilabelTask): @@ -43,7 +43,7 @@ def __init__(self, llm: LLM, prompt_class: str): prompt_class=prompt_class, ) - def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, list[str]]: + def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str | list[str]]: """ Formats the model's output into a structured dictionary with "question", "chunk", and "passages". If `get_matches` is `True`, attempts to find the closest matches for each passage within the @@ -57,21 +57,21 @@ def format_output(self, output: str, input: dict[str, Any] | None = None) -> dic Returns: A dictionary with "chunk", "question", and a list of "passages". """ - passages = get_passages_list(output) or [] + passages: list[str] = get_passages_list(output) or [] if self.get_matches: - matched_passages = [] + matched_passages: list[str] = [] for passage in passages: - if passage in input["chunk"]: + if passage in input["chunk"]: #type: ignore matched_passages.append(passage) else: - matched_passage = get_closest_substring(input["chunk"], passage) + matched_passage = get_closest_substring(input["chunk"], passage) #type: ignore matched_passages.append(matched_passage) - return {"chunk": input["chunk"], "question": input["question"], "passages": matched_passages} + return {"chunk": input["chunk"], "question": input["question"], "passages": matched_passages} #type: ignore - return {"chunk": input["chunk"], "question": input["question"], "passages": passages} + return {"chunk": input["chunk"], "question": input["question"], "passages": passages} #type: ignore class AnswerGenTask(BaseDistilabelTask): @@ -83,7 +83,7 @@ class AnswerGenTask(BaseDistilabelTask): def __init__(self, llm: LLM, prompt_class: str): super().__init__(llm=llm, inputs=["chunk", "question"], outputs=["basic_answer"], prompt_class=prompt_class) - def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str]: # noqa: PLR6301 + def format_output(self, output: str, input: dict[str, Any] | None = None) -> dict[str, str | list[str]]: # noqa: PLR6301 """ Formats the model's output into a structured dictionary with the "basic_answer" key. From 359866bce43948da392fcf19b19ea0081e267796 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Tue, 12 Nov 2024 14:39:34 +0100 Subject: [PATCH 09/26] fix ruff format --- .../src/ragbits/evaluate/dataset_generator/pipeline.py | 2 +- .../dataset_generator/tasks/corpus_generation.py | 2 +- .../dataset_generator/tasks/text_generation/base.py | 2 +- .../dataset_generator/tasks/text_generation/qa.py | 10 +++++----- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py index 139f0c67..f48e8d46 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py @@ -46,7 +46,7 @@ def _parse_pipeline_steps(self) -> list[Step]: llm = get_cls_from_config(llm_config.provider_type, module)(**llm_kwargs) task_kwargs: dict[Any, Any] = {"llm": llm} if getattr(task_config, "kwargs", None): - task_kwargs.update(OmegaConf.to_container(task_config.kwargs)) #type: ignore + task_kwargs.update(OmegaConf.to_container(task_config.kwargs)) # type: ignore task = get_cls_from_config(task_config.type, module)(**task_kwargs) tasks.append(task) if getattr(task_config, "filters", None): diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py index ccded8db..3be51a41 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py @@ -58,7 +58,7 @@ def process(self, *inputs: StepInput) -> "StepOutput": for inp in inputs[0]: for _ in range(self._num_per_query): new_inp = deepcopy(inp) - prompt_inp = self._prompt_class.input_type(**{self.inputs[0]: new_inp[self.inputs[0]]}) #type: ignore + prompt_inp = self._prompt_class.input_type(**{self.inputs[0]: new_inp[self.inputs[0]]}) # type: ignore new_inp[self.outputs[0]] = asyncio.get_event_loop().run_until_complete( self._llm.generate(prompt=self._prompt_class(prompt_inp)) ) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py index 8fd46d6b..ba9a7154 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/base.py @@ -50,7 +50,7 @@ def format_input(self, input: dict[str, Any]) -> ChatFormat: Returns: The formatted chat object containing the input for query generation. """ - chat = self._prompt_class(self._prompt_class.input_type(**input)).chat #type: ignore + chat = self._prompt_class(self._prompt_class.input_type(**input)).chat # type: ignore return chat @abstractmethod diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py index 271acdd1..900d772f 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py @@ -25,7 +25,7 @@ def format_output(self, output: str, input: dict[str, Any] | None = None) -> dic Returns: A dictionary containing "chunk" and "question". """ - return {"chunk": input["chunk"], "question": output} #type: ignore + return {"chunk": input["chunk"], "question": output} # type: ignore class PassagesGenTask(BaseDistilabelTask): @@ -63,15 +63,15 @@ def format_output(self, output: str, input: dict[str, Any] | None = None) -> dic matched_passages: list[str] = [] for passage in passages: - if passage in input["chunk"]: #type: ignore + if passage in input["chunk"]: # type: ignore matched_passages.append(passage) else: - matched_passage = get_closest_substring(input["chunk"], passage) #type: ignore + matched_passage = get_closest_substring(input["chunk"], passage) # type: ignore matched_passages.append(matched_passage) - return {"chunk": input["chunk"], "question": input["question"], "passages": matched_passages} #type: ignore + return {"chunk": input["chunk"], "question": input["question"], "passages": matched_passages} # type: ignore - return {"chunk": input["chunk"], "question": input["question"], "passages": passages} #type: ignore + return {"chunk": input["chunk"], "question": input["question"], "passages": passages} # type: ignore class AnswerGenTask(BaseDistilabelTask): From 7805560f1c2c6c679da96743d660ed72485e9f31 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Tue, 12 Nov 2024 14:41:40 +0100 Subject: [PATCH 10/26] ruff formating --- examples/evaluation/dataset-generator/generate.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/examples/evaluation/dataset-generator/generate.py b/examples/evaluation/dataset-generator/generate.py index cdb983df..f0733e9b 100644 --- a/examples/evaluation/dataset-generator/generate.py +++ b/examples/evaluation/dataset-generator/generate.py @@ -6,7 +6,14 @@ @hydra.main(config_path="config", config_name="generate", version_base="3.2") -def main(config: DictConfig): +def main(config: DictConfig) -> None: + """ + A main function for dataset generation example + Args: + config - configuration + Returns: + None + """ TOPICS = ["conspiracy theories", "machine learning"] generation_pipeline = DatasetGenerationPipeline(config=config) From 65a5020a59e606ff26fdc4f7bc86812b715ff09e Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Tue, 19 Nov 2024 14:21:25 +0100 Subject: [PATCH 11/26] Update packages/ragbits-evaluate/src/ragbits/evaluate/utils.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Paweł Chmielak <46346212+pawel-chmielak-deepsense@users.noreply.github.com> --- packages/ragbits-evaluate/src/ragbits/evaluate/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py b/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py index 456d3ebe..f2ea8fb8 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/utils.py @@ -55,7 +55,7 @@ def log_dataset_to_file(dataset: Dataset, output_dir: Path | None = None) -> Pat Log the evaluation results locally. Args: - dataset: Huggingface dataet to be logged. + dataset: Huggingface dataset to be logged. output_dir: The output directory. Returns: From c7617bbbe98c8658e44107917b43ef3ea513bcfc Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Tue, 19 Nov 2024 16:35:31 +0100 Subject: [PATCH 12/26] Update packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Paweł Chmielak <46346212+pawel-chmielak-deepsense@users.noreply.github.com> --- .../evaluate/dataset_generator/tasks/text_generation/qa.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py index 900d772f..66742c4a 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py @@ -51,8 +51,7 @@ def format_output(self, output: str, input: dict[str, Any] | None = None) -> dic Args: output: The raw output generated by the text generation model. - input: Required if `get_matches` is `True`, containing "chunk" - and "question". + input: Required if `get_matches` is `True`, containing "chunk" and "question". Returns: A dictionary with "chunk", "question", and a list of "passages". From b58097f80ab409a389a0362e416727554565be9b Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Tue, 19 Nov 2024 16:35:46 +0100 Subject: [PATCH 13/26] Update packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Paweł Chmielak <46346212+pawel-chmielak-deepsense@users.noreply.github.com> --- .../evaluate/dataset_generator/tasks/corpus_generation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py index 3be51a41..a59a7b6b 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py @@ -55,7 +55,7 @@ def process(self, *inputs: StepInput) -> "StepOutput": a generated corpus """ result = [] - for inp in inputs[0]: + for topic in inputs[0]: for _ in range(self._num_per_query): new_inp = deepcopy(inp) prompt_inp = self._prompt_class.input_type(**{self.inputs[0]: new_inp[self.inputs[0]]}) # type: ignore From 40af5031979cf55b7d513f335f0a183c6a1f3e21 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Tue, 19 Nov 2024 16:37:00 +0100 Subject: [PATCH 14/26] naming --- .../evaluate/dataset_generator/tasks/corpus_generation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py index a59a7b6b..96a4f132 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py @@ -57,7 +57,7 @@ def process(self, *inputs: StepInput) -> "StepOutput": result = [] for topic in inputs[0]: for _ in range(self._num_per_query): - new_inp = deepcopy(inp) + new_inp = deepcopy(topic) prompt_inp = self._prompt_class.input_type(**{self.inputs[0]: new_inp[self.inputs[0]]}) # type: ignore new_inp[self.outputs[0]] = asyncio.get_event_loop().run_until_complete( self._llm.generate(prompt=self._prompt_class(prompt_inp)) From 7c1d52b287788505682f9adf60cf1207515ca560 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Tue, 19 Nov 2024 19:36:49 +0100 Subject: [PATCH 15/26] pydantic configuration of data generation pipeline --- docs/how-to/generate_dataset.md | 4 +- .../dataset-generator/config/generate.yaml | 37 +++++- .../dataset-generator/config/pipeline/qa.yaml | 33 ----- .../evaluation/dataset-generator/generate.py | 4 +- .../evaluate/dataset_generator/pipeline.py | 113 +++++++++++++++--- 5 files changed, 135 insertions(+), 56 deletions(-) delete mode 100644 examples/evaluation/dataset-generator/config/pipeline/qa.yaml diff --git a/docs/how-to/generate_dataset.md b/docs/how-to/generate_dataset.md index e0924dc5..9720f755 100644 --- a/docs/how-to/generate_dataset.md +++ b/docs/how-to/generate_dataset.md @@ -95,7 +95,7 @@ pipeline_config = OmegaConf.create( topics = ["conspiracy theories", "retrival augmented generation"] -pipeline = DatasetGenerationPipeline(pipeline_config) +pipeline = DatasetGenerationPipeline.from_dict_config(dict_config=pipeline_config) dataset = pipeline(topics) print_dataset(dataset) ``` @@ -193,7 +193,7 @@ topics = [ "Large Language Models (LLM) are trained to predict the term given the context", "Logistic regression is a simpliest form of neural network with no hidden neurons and output activated with sigmoid function", ] -pipeline = DatasetGenerationPipeline(pipeline_config) +pipeline = DatasetGenerationPipeline.from_dict_config(dict_config=pipeline_config) dataset = pipeline(topics) print_dataset(dataset) ``` diff --git a/examples/evaluation/dataset-generator/config/generate.yaml b/examples/evaluation/dataset-generator/config/generate.yaml index 75440836..68bbfa43 100644 --- a/examples/evaluation/dataset-generator/config/generate.yaml +++ b/examples/evaluation/dataset-generator/config/generate.yaml @@ -1,5 +1,34 @@ -defaults: - - pipeline: qa - - _self_ - input_name: query +name: synthetic-RAG-data +tasks: + - type: ragbits.evaluate.dataset_generator.tasks.corpus_generation:CorpusGenerationStep + llm: + provider_type: ragbits.core.llms.litellm:LiteLLM + kwargs: + model_name: gpt-4o + kwargs: + num_per_query: 5 + prompt_class: ragbits.evaluate.dataset_generator.prompts.corpus_generation:BasicCorpusGenerationPrompt + - type: ragbits.evaluate.dataset_generator.tasks.text_generation.qa:QueryGenTask + llm: + provider_type: distilabel.llms:OpenAILLM + kwargs: + model: gpt-4o + kwargs: + prompt_class: ragbits.evaluate.dataset_generator.prompts.qa:QueryGenPrompt + - type: ragbits.evaluate.dataset_generator.tasks.text_generation.qa:AnswerGenTask + llm: + provider_type: distilabel.llms:OpenAILLM + kwargs: + model: gpt-4o + kwargs: + prompt_class: ragbits.evaluate.dataset_generator.prompts.qa:BasicAnswerGenPrompt + - type: ragbits.evaluate.dataset_generator.tasks.text_generation.qa:PassagesGenTask + llm: + provider_type: distilabel.llms:OpenAILLM + kwargs: + model: gpt-4o + kwargs: + prompt_class: ragbits.evaluate.dataset_generator.prompts.qa:PassagesGenPrompt + filters: + - ragbits.evaluate.dataset_generator.tasks.filter.dont_know:DontKnowFilter diff --git a/examples/evaluation/dataset-generator/config/pipeline/qa.yaml b/examples/evaluation/dataset-generator/config/pipeline/qa.yaml deleted file mode 100644 index b46ebb8a..00000000 --- a/examples/evaluation/dataset-generator/config/pipeline/qa.yaml +++ /dev/null @@ -1,33 +0,0 @@ -name: synthetic-RAG-data -tasks: - - type: ragbits.evaluate.dataset_generator.tasks.corpus_generation:CorpusGenerationStep - llm: - provider_type: ragbits.core.llms.litellm:LiteLLM - kwargs: - model_name: gpt-4o - kwargs: - num_per_query: 5 - prompt_class: ragbits.evaluate.dataset_generator.prompts.corpus_generation:BasicCorpusGenerationPrompt - - type: ragbits.evaluate.dataset_generator.tasks.text_generation.qa:QueryGenTask - llm: - provider_type: distilabel.llms:OpenAILLM - kwargs: - model: gpt-4o - kwargs: - prompt_class: ragbits.evaluate.dataset_generator.prompts.qa:QueryGenPrompt - - type: ragbits.evaluate.dataset_generator.tasks.text_generation.qa:AnswerGenTask - llm: - provider_type: distilabel.llms:OpenAILLM - kwargs: - model: gpt-4o - kwargs: - prompt_class: ragbits.evaluate.dataset_generator.prompts.qa:BasicAnswerGenPrompt - - type: ragbits.evaluate.dataset_generator.tasks.text_generation.qa:PassagesGenTask - llm: - provider_type: distilabel.llms:OpenAILLM - kwargs: - model: gpt-4o - kwargs: - prompt_class: ragbits.evaluate.dataset_generator.prompts.qa:PassagesGenPrompt - filters: - - ragbits.evaluate.dataset_generator.tasks.filter.dont_know:DontKnowFilter diff --git a/examples/evaluation/dataset-generator/generate.py b/examples/evaluation/dataset-generator/generate.py index f0733e9b..85aa34b4 100644 --- a/examples/evaluation/dataset-generator/generate.py +++ b/examples/evaluation/dataset-generator/generate.py @@ -15,10 +15,8 @@ def main(config: DictConfig) -> None: None """ TOPICS = ["conspiracy theories", "machine learning"] - - generation_pipeline = DatasetGenerationPipeline(config=config) + generation_pipeline = DatasetGenerationPipeline.from_dict_config(dict_config=config) result_dataset = generation_pipeline(corpus=TOPICS) - breakpoint() log_dataset_to_file(dataset=result_dataset) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py index f48e8d46..6cdc0325 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py @@ -5,17 +5,102 @@ from distilabel.pipeline import Pipeline from distilabel.steps.base import Step from omegaconf import DictConfig, OmegaConf +from pydantic import BaseModel from ragbits.core.utils.config_handling import get_cls_from_config module = sys.modules[__name__] +class LLMConfigForTask(BaseModel): + """ + Configuration for the LLM (Language Model) associated with a specific task. + + Attributes: + provider_type (str): The type of LLM provider. + kwargs (dict): Additional parameters or settings for the LLM provider. + """ + + provider_type: str + kwargs: dict + + +class TaskConfig(BaseModel): + """ + Configuration for an individual task in the dataset generation pipeline. + + Attributes: + type: str: type of the task + llm (LLMConfigForTask): The configuration for the LLM used in this task. + kwargs (dicts): Optional additional parameters or settings for the task. + filters (list[str] | None): Optional filters to apply during the task. Defaults to None. + """ + + type: str + llm: LLMConfigForTask + kwargs: dict | None = None + filters: list[str] | None = None + + +class DatasetGenerationPipelineConfig(BaseModel): + """ + Configuration for the entire dataset generation pipeline. + + Attributes: + name (str): The name of the dataset generation pipeline. + input_name (str): The name of the input resource or dataset. + tasks (list[TaskConfig]): A list of task configurations included in the pipeline. + """ + + name: str + input_name: str + tasks: list[TaskConfig] + + @classmethod + def from_dict_config(cls, dict_config: DictConfig) -> "DatasetGenerationPipelineConfig": + """ + Creates an instance of `DatasetGenerationPipelineConfig` from a dictionary-based configuration. + + Args: + dict_config (DictConfig): A configuration object containing pipeline details. + + Returns: + DatasetGenerationPipelineConfig: An instance populated with data from the given configuration. + + """ + name = dict_config.name + input_name = dict_config.input_name + tasks = [ + TaskConfig( + type=task_config.type, + llm=LLMConfigForTask( + provider_type=task_config.llm.provider_type, + kwargs=OmegaConf.to_container(task_config.llm.kwargs), + ), + kwargs=OmegaConf.to_container(task_config.kwargs), + filters=getattr(task_config, "filters", None), + ) + for task_config in dict_config.tasks + ] + return cls(name=name, input_name=input_name, tasks=tasks) + + class DatasetGenerationPipeline: """A pipeline for dataset generation""" - def __init__(self, config: DictConfig): + def __init__(self, config: DatasetGenerationPipelineConfig): self.config = config + self._instantiate_pipeline() + + @classmethod + def from_dict_config(cls, dict_config: DictConfig) -> "DatasetGenerationPipeline": + """ + Instantiates the pipeline from dict config validated through pydantic base model + Returns: + DatasetGenerationPipeline + """ + config = DatasetGenerationPipelineConfig.from_dict_config(dict_config=dict_config) + return cls(config=config) def __call__(self, corpus: list[str]) -> Dataset: """ @@ -26,27 +111,18 @@ def __call__(self, corpus: list[str]) -> Dataset: dataset instance """ dataset = Dataset.from_dict({self.config.input_name: corpus}) - with Pipeline(self.config.pipeline.name) as pipeline: - tasks = self._parse_pipeline_steps() - prev_task = None - for task in tasks: - if prev_task: - prev_task >> task - prev_task = task - distiset = pipeline.run(use_cache=False, dataset=dataset) + distiset = self.pipeline.run(use_cache=False, dataset=dataset) result = distiset["default"]["train"] result = result.remove_columns(["distilabel_metadata", "model_name"]) return result def _parse_pipeline_steps(self) -> list[Step]: tasks = [] - for task_config in self.config.pipeline.tasks: + for task_config in self.config.tasks: llm_config = task_config.llm - llm_kwargs = OmegaConf.to_container(llm_config.kwargs) - llm = get_cls_from_config(llm_config.provider_type, module)(**llm_kwargs) + llm = get_cls_from_config(llm_config.provider_type, module)(**llm_config.kwargs) task_kwargs: dict[Any, Any] = {"llm": llm} - if getattr(task_config, "kwargs", None): - task_kwargs.update(OmegaConf.to_container(task_config.kwargs)) # type: ignore + task_kwargs.update(task_config.kwargs or {}) # type: ignore task = get_cls_from_config(task_config.type, module)(**task_kwargs) tasks.append(task) if getattr(task_config, "filters", None): @@ -54,3 +130,12 @@ def _parse_pipeline_steps(self) -> list[Step]: filter = get_cls_from_config(filter_type, module)(tasks[-1]) tasks.append(filter) return tasks + + def _instantiate_pipeline(self) -> None: + with Pipeline(self.config.name) as self.pipeline: + tasks = self._parse_pipeline_steps() + prev_task = None + for task in tasks: + if prev_task: + prev_task >> task + prev_task = task From 6c34965e49138beb935e8b5a1dd1d261c1787f91 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Tue, 19 Nov 2024 20:04:41 +0100 Subject: [PATCH 16/26] concurent execution --- .../dataset-generator/config/generate.yaml | 2 +- .../evaluate/dataset_generator/pipeline.py | 12 +++---- .../tasks/corpus_generation.py | 33 +++++++++++++------ 3 files changed, 30 insertions(+), 17 deletions(-) diff --git a/examples/evaluation/dataset-generator/config/generate.yaml b/examples/evaluation/dataset-generator/config/generate.yaml index 68bbfa43..7f6d6d40 100644 --- a/examples/evaluation/dataset-generator/config/generate.yaml +++ b/examples/evaluation/dataset-generator/config/generate.yaml @@ -1,4 +1,4 @@ -input_name: query +input_name: topic name: synthetic-RAG-data tasks: - type: ragbits.evaluate.dataset_generator.tasks.corpus_generation:CorpusGenerationStep diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py index 6cdc0325..bb95e151 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/pipeline.py @@ -75,9 +75,9 @@ def from_dict_config(cls, dict_config: DictConfig) -> "DatasetGenerationPipeline type=task_config.type, llm=LLMConfigForTask( provider_type=task_config.llm.provider_type, - kwargs=OmegaConf.to_container(task_config.llm.kwargs), + kwargs=OmegaConf.to_container(task_config.llm.kwargs), # type: ignore ), - kwargs=OmegaConf.to_container(task_config.kwargs), + kwargs=OmegaConf.to_container(task_config.kwargs), # type: ignore filters=getattr(task_config, "filters", None), ) for task_config in dict_config.tasks @@ -125,10 +125,10 @@ def _parse_pipeline_steps(self) -> list[Step]: task_kwargs.update(task_config.kwargs or {}) # type: ignore task = get_cls_from_config(task_config.type, module)(**task_kwargs) tasks.append(task) - if getattr(task_config, "filters", None): - for filter_type in task_config.filters: - filter = get_cls_from_config(filter_type, module)(tasks[-1]) - tasks.append(filter) + filter_types = getattr(task_config, "filters", None) or [] + for filter_type in filter_types: + filter = get_cls_from_config(filter_type, module)(tasks[-1]) + tasks.append(filter) return tasks def _instantiate_pipeline(self) -> None: diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py index 96a4f132..10542b7f 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py @@ -35,7 +35,7 @@ def inputs(self) -> list[str]: Returns: list of input fields """ - return ["query"] + return ["topic"] @property def outputs(self) -> list[str]: @@ -54,13 +54,26 @@ def process(self, *inputs: StepInput) -> "StepOutput": Returns: a generated corpus """ - result = [] - for topic in inputs[0]: - for _ in range(self._num_per_query): - new_inp = deepcopy(topic) - prompt_inp = self._prompt_class.input_type(**{self.inputs[0]: new_inp[self.inputs[0]]}) # type: ignore - new_inp[self.outputs[0]] = asyncio.get_event_loop().run_until_complete( - self._llm.generate(prompt=self._prompt_class(prompt_inp)) - ) - result.append(new_inp) + result = asyncio.get_event_loop().run_until_complete(self._process_topics(topics=inputs[0])) yield result + + async def _process_topics(self, topics: list[dict]) -> list[dict]: + """ + Processes a list of topics concurrently, respecting the batch size limit. + + Args: + topics (List[dict]): A list of topics to process. + + Returns: + List[dict]: A list of processed topics. + """ + tasks = [self._process_topic(topic) for _ in range(self._num_per_query) for topic in topics] + results = await asyncio.gather(*tasks) + return results + + async def _process_topic(self, topic: dict) -> dict: + new_inp = deepcopy(topic) + prompt_inp = self._prompt_class.input_type(**{self.inputs[0]: new_inp[self.inputs[0]]}) # type: ignore + new_inp[self.outputs[0]] = await self._llm.generate(prompt=self._prompt_class(prompt_inp)) + return new_inp + From 12ed8702c90744eb0688ad36dd8dd77b85bc507d Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Tue, 19 Nov 2024 20:19:45 +0100 Subject: [PATCH 17/26] conurent execution --- .../evaluation/dataset-generator/config/generate.yaml | 2 +- .../dataset_generator/tasks/corpus_generation.py | 11 +---------- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/examples/evaluation/dataset-generator/config/generate.yaml b/examples/evaluation/dataset-generator/config/generate.yaml index 7f6d6d40..68bbfa43 100644 --- a/examples/evaluation/dataset-generator/config/generate.yaml +++ b/examples/evaluation/dataset-generator/config/generate.yaml @@ -1,4 +1,4 @@ -input_name: topic +input_name: query name: synthetic-RAG-data tasks: - type: ragbits.evaluate.dataset_generator.tasks.corpus_generation:CorpusGenerationStep diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py index 10542b7f..94e9c1f9 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py @@ -35,7 +35,7 @@ def inputs(self) -> list[str]: Returns: list of input fields """ - return ["topic"] + return ["query"] @property def outputs(self) -> list[str]: @@ -58,15 +58,6 @@ def process(self, *inputs: StepInput) -> "StepOutput": yield result async def _process_topics(self, topics: list[dict]) -> list[dict]: - """ - Processes a list of topics concurrently, respecting the batch size limit. - - Args: - topics (List[dict]): A list of topics to process. - - Returns: - List[dict]: A list of processed topics. - """ tasks = [self._process_topic(topic) for _ in range(self._num_per_query) for topic in topics] results = await asyncio.gather(*tasks) return results From e035bb4a5c4613d971998ed341afa94cc879d125 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Tue, 19 Nov 2024 20:20:07 +0100 Subject: [PATCH 18/26] fix ruff format --- .../evaluate/dataset_generator/tasks/corpus_generation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py index 94e9c1f9..034e1ee4 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py @@ -67,4 +67,3 @@ async def _process_topic(self, topic: dict) -> dict: prompt_inp = self._prompt_class.input_type(**{self.inputs[0]: new_inp[self.inputs[0]]}) # type: ignore new_inp[self.outputs[0]] = await self._llm.generate(prompt=self._prompt_class(prompt_inp)) return new_inp - From 1ff0a53d1a4bedf50a383d0c407a3191afe1eaf4 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Wed, 20 Nov 2024 00:29:24 +0100 Subject: [PATCH 19/26] pr comments --- examples/evaluation/dataset-generator/config/generate.yaml | 2 +- packages/ragbits-evaluate/pyproject.toml | 2 +- .../evaluate/dataset_generator/tasks/corpus_generation.py | 6 +++--- .../evaluate/dataset_generator/tasks/text_generation/qa.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/evaluation/dataset-generator/config/generate.yaml b/examples/evaluation/dataset-generator/config/generate.yaml index 68bbfa43..b24b9413 100644 --- a/examples/evaluation/dataset-generator/config/generate.yaml +++ b/examples/evaluation/dataset-generator/config/generate.yaml @@ -7,7 +7,7 @@ tasks: kwargs: model_name: gpt-4o kwargs: - num_per_query: 5 + num_per_topic: 5 prompt_class: ragbits.evaluate.dataset_generator.prompts.corpus_generation:BasicCorpusGenerationPrompt - type: ragbits.evaluate.dataset_generator.tasks.text_generation.qa:QueryGenTask llm: diff --git a/packages/ragbits-evaluate/pyproject.toml b/packages/ragbits-evaluate/pyproject.toml index 3d26595e..92840c83 100644 --- a/packages/ragbits-evaluate/pyproject.toml +++ b/packages/ragbits-evaluate/pyproject.toml @@ -31,7 +31,7 @@ classifiers = [ "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Software Development :: Libraries :: Python Modules", ] -dependencies = ["hydra-core~=1.3.2", "neptune~=1.12.0", "ragbits-core==0.3.0", "optuna==4.0.0"] +dependencies = ["hydra-core~=1.3.2", "neptune~=1.12.0", "ragbits-core==0.3.0", "optuna==4.0.0", "distilabel==1.4.1"] [project.optional-dependencies] relari = [ diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py index 034e1ee4..9fe203d1 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/corpus_generation.py @@ -18,7 +18,7 @@ class CorpusGenerationStep(Step): def __init__( self, llm: LLM, - num_per_query: int, + num_per_topic: int, prompt_class: str | type[Prompt], ): super().__init__() @@ -26,7 +26,7 @@ def __init__( self._prompt_class = ( get_cls_from_config(prompt_class, module) if isinstance(prompt_class, str) else prompt_class ) - self._num_per_query = num_per_query + self._num_per_topic = num_per_topic @property def inputs(self) -> list[str]: @@ -58,7 +58,7 @@ def process(self, *inputs: StepInput) -> "StepOutput": yield result async def _process_topics(self, topics: list[dict]) -> list[dict]: - tasks = [self._process_topic(topic) for _ in range(self._num_per_query) for topic in topics] + tasks = [self._process_topic(topic) for _ in range(self._num_per_topic) for topic in topics] results = await asyncio.gather(*tasks) return results diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py index 66742c4a..219545d0 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/text_generation/qa.py @@ -33,7 +33,7 @@ class PassagesGenTask(BaseDistilabelTask): A task for generating passages related to a specific question and answer from a text chunk. """ - get_matches: bool = False + should_get_matches: bool = False def __init__(self, llm: LLM, prompt_class: str): super().__init__( @@ -58,7 +58,7 @@ def format_output(self, output: str, input: dict[str, Any] | None = None) -> dic """ passages: list[str] = get_passages_list(output) or [] - if self.get_matches: + if self.should_get_matches: matched_passages: list[str] = [] for passage in passages: From a4d21575b54bd762bfffe8c43b0856d74939fc1d Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Wed, 20 Nov 2024 00:35:14 +0100 Subject: [PATCH 20/26] pr comments --- .../dataset_generator/tasks/filter/dont_know.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py index 807d4077..579cf4f6 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py @@ -16,17 +16,17 @@ class DontKnowFilter(BaseFilter): def process(self, *inputs: StepInput) -> "StepOutput": """ - Runs the processing step + Runs the basic rule-based filtering of the inputs Args: - inputs - inputs to the step + inputs - the outputs of some generation step Returns: - filtered outputs + outputs filtered to the ones that do not contain the pre-defined phrases """ result = [ - {input_type: inp[input_type] for input_type in inp} for inp in inputs[0] if not self._is_dont_know(inp) + {input_type: input_[input_type] for input_type in input_} for input_ in inputs[0] if not self._is_dont_know(input_) ] yield result @staticmethod - def _is_dont_know(inp: dict[str, Any]) -> bool: - return any(s.lower() in inp["basic_answer"].lower() for s in DONT_KNOW_PHRASES) + def _is_dont_know(input_: dict[str, Any]) -> bool: + return any(s.lower() in input_["basic_answer"].lower() for s in DONT_KNOW_PHRASES) From 2fbcde07f2ceaae6d0fc99d8ac07becd6285fe8f Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Wed, 20 Nov 2024 00:37:42 +0100 Subject: [PATCH 21/26] pr comments --- .../src/ragbits/evaluate/dataset_generator/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py index e29e31b9..01d3fb6c 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py @@ -1,4 +1,4 @@ -import ast +import json import re import warnings from difflib import SequenceMatcher @@ -35,7 +35,7 @@ def get_passages_list(raw_passages: str) -> list[str]: passages_content = match.group(1) try: # Use eval to convert the string to a list, assuming it's a valid list-like format - return ast.literal_eval("[" + passages_content + "]") + return json.loads("[" + passages_content + "]") except (SyntaxError, ValueError): warnings.warn("Unable to evaluate the passages content. Check the format.", category=UserWarning) return [] From d114932fe5a7a51c944e4e2c6e5188b31e5f0f75 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Wed, 20 Nov 2024 00:45:46 +0100 Subject: [PATCH 22/26] fix ruff formater --- .../tasks/filter/dont_know.py | 4 +- uv.lock | 60 +++++++++++++++++-- 2 files changed, 57 insertions(+), 7 deletions(-) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py index 579cf4f6..da473ac4 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/tasks/filter/dont_know.py @@ -23,7 +23,9 @@ def process(self, *inputs: StepInput) -> "StepOutput": outputs filtered to the ones that do not contain the pre-defined phrases """ result = [ - {input_type: input_[input_type] for input_type in input_} for input_ in inputs[0] if not self._is_dont_know(input_) + {input_type: input_[input_type] for input_type in input_} + for input_ in inputs[0] + if not self._is_dont_know(input_) ] yield result diff --git a/uv.lock b/uv.lock index c89e5221..0d7e6720 100644 --- a/uv.lock +++ b/uv.lock @@ -871,6 +871,31 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c9/7a/cef76fd8438a42f96db64ddaa85280485a9c395e7df3db8158cfec1eee34/dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7", size = 116252 }, ] +[[package]] +name = "distilabel" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "datasets" }, + { name = "httpx" }, + { name = "jinja2" }, + { name = "multiprocess" }, + { name = "nest-asyncio" }, + { name = "networkx" }, + { name = "orjson" }, + { name = "portalocker" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "scipy" }, + { name = "tblib" }, + { name = "typer" }, + { name = "universal-pathlib" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2e/1b/331aeeda851a888e8bff84b8074cb1301909b06e509140a85a23dd1345cf/distilabel-1.4.1.tar.gz", hash = "sha256:0c373be234e8f2982ec7f940d9a95585b15306b6ab5315f5a6a45214d8f34006", size = 6420123 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b6/b3/62d07a936cd9c3039d811681c33b9fc898e48219cf22c9186954e2575365/distilabel-1.4.1-py3-none-any.whl", hash = "sha256:4643da7f3abae86a330d86d1498443ea56978e462e21ae3d106a4c6013386965", size = 442152 }, +] + [[package]] name = "distlib" version = "0.3.8" @@ -2686,7 +2711,7 @@ name = "nvidia-cudnn-cu12" version = "8.9.2.26" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" }, + { name = "nvidia-cublas-cu12" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/ff/74/a2e2be7fb83aaedec84f391f082cf765dfb635e7caa9b49065f73e4835d8/nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl", hash = "sha256:5ccb288774fdfb07a7e7025ffec286971c06d8d7b4fb162525334616d7629ff9", size = 731725872 }, @@ -2713,9 +2738,9 @@ name = "nvidia-cusolver-cu12" version = "11.4.5.107" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" }, - { name = "nvidia-cusparse-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" }, - { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" }, + { name = "nvidia-cublas-cu12" }, + { name = "nvidia-cusparse-cu12" }, + { name = "nvidia-nvjitlink-cu12" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/bc/1d/8de1e5c67099015c834315e333911273a8c6aaba78923dd1d1e25fc5f217/nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd", size = 124161928 }, @@ -2726,7 +2751,7 @@ name = "nvidia-cusparse-cu12" version = "12.1.0.106" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" }, + { name = "nvidia-nvjitlink-cu12" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/65/5b/cfaeebf25cd9fdec14338ccb16f6b2c4c7fa9163aefcf057d86b9cc248bb/nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c", size = 195958278 }, @@ -3918,6 +3943,7 @@ name = "ragbits-evaluate" version = "0.3.0" source = { editable = "packages/ragbits-evaluate" } dependencies = [ + { name = "distilabel" }, { name = "hydra-core" }, { name = "neptune" }, { name = "optuna" }, @@ -3941,6 +3967,7 @@ dev = [ [package.metadata] requires-dist = [ { name = "continuous-eval", marker = "extra == 'relari'", specifier = "~=0.3.12" }, + { name = "distilabel", specifier = "==1.4.1" }, { name = "hydra-core", specifier = "~=1.3.2" }, { name = "neptune", specifier = "~=1.12.0" }, { name = "optuna", specifier = "==4.0.0" }, @@ -4803,6 +4830,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/40/44/4a5f08c96eb108af5cb50b41f76142f0afa346dfa99d5296fe7202a11854/tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f", size = 35252 }, ] +[[package]] +name = "tblib" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1a/df/4f2cd7eaa6d41a7994d46527349569d46e34d9cdd07590b5c5b0dcf53de3/tblib-3.0.0.tar.gz", hash = "sha256:93622790a0a29e04f0346458face1e144dc4d32f493714c6c3dff82a4adb77e6", size = 30616 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/87/ce70db7cae60e67851eb94e1a2127d4abb573d3866d2efd302ceb0d4d2a5/tblib-3.0.0-py3-none-any.whl", hash = "sha256:80a6c77e59b55e83911e1e607c649836a69c103963c5f28a46cbeef44acf8129", size = 12478 }, +] + [[package]] name = "tenacity" version = "8.5.0" @@ -5018,7 +5054,7 @@ name = "triton" version = "2.2.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "filelock", marker = "(python_full_version < '3.12' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version < '3.12' and platform_system != 'Darwin' and platform_system != 'Linux')" }, + { name = "filelock", marker = "python_full_version < '3.12'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/95/05/ed974ce87fe8c8843855daa2136b3409ee1c126707ab54a8b72815c08b49/triton-2.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2294514340cfe4e8f4f9e5c66c702744c4a117d25e618bd08469d0bfed1e2e5", size = 167900779 }, @@ -5090,6 +5126,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a6/ab/7e5f53c3b9d14972843a647d8d7a853969a58aecc7559cb3267302c94774/tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd", size = 346586 }, ] +[[package]] +name = "universal-pathlib" +version = "0.2.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "fsspec" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/24/67/6c31ba464eafda05c677628dd7859ed4904597a78694d9cc81b593c6bad2/universal_pathlib-0.2.5.tar.gz", hash = "sha256:ea5d4fb8178c2ab469cf4fa46d0ceb16ccb378da46dbbc28a8b9c1eebdccc655", size = 174755 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/d9/289d308f889aac33639703a60906e3a0f3ec97419b7ca5bedaddc77648fd/universal_pathlib-0.2.5-py3-none-any.whl", hash = "sha256:a634f700eca827b4ad03bfa0267e51161560dd1de83b051cf0fccf39b3e56b32", size = 49892 }, +] + [[package]] name = "unstructured" version = "0.15.13" From f94403345b7c437f9d98e357bab5e791177ef130 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Wed, 20 Nov 2024 10:42:06 +0100 Subject: [PATCH 23/26] add distilabel to whitelist --- .libraries-whitelist.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.libraries-whitelist.txt b/.libraries-whitelist.txt index eb7e2f65..79551157 100644 --- a/.libraries-whitelist.txt +++ b/.libraries-whitelist.txt @@ -2,4 +2,5 @@ pkg_resources tiktoken chardet chroma-hnswlib -rouge \ No newline at end of file +rouge +distilabel \ No newline at end of file From b95866ce0438c9b0af06267f70da5ea032bffdf1 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Wed, 20 Nov 2024 12:19:25 +0100 Subject: [PATCH 24/26] add configuration schema description to example script --- examples/evaluation/dataset-generator/generate.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/evaluation/dataset-generator/generate.py b/examples/evaluation/dataset-generator/generate.py index 85aa34b4..9eae9e46 100644 --- a/examples/evaluation/dataset-generator/generate.py +++ b/examples/evaluation/dataset-generator/generate.py @@ -10,7 +10,8 @@ def main(config: DictConfig) -> None: """ A main function for dataset generation example Args: - config - configuration + config (DictConfig) - configuration should follow + ragbits.evaluate.dataset_generator.DatasetGenerationPipelineConfig data model Returns: None """ From fe8dea76e5954e87ea32ffa0ae62e6d0555a17dc Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Wed, 20 Nov 2024 12:20:54 +0100 Subject: [PATCH 25/26] add description of data model to docs --- docs/how-to/generate_dataset.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/how-to/generate_dataset.md b/docs/how-to/generate_dataset.md index 9720f755..0df1034b 100644 --- a/docs/how-to/generate_dataset.md +++ b/docs/how-to/generate_dataset.md @@ -38,6 +38,8 @@ def print_dataset(dataset: Dataset): ) print("\r\n".join(entries)) +# configuration should follow +# ragbits.evaluate.dataset_generator.DatasetGenerationPipelineConfig data model pipeline_config = OmegaConf.create( { "input_name": "query", @@ -132,6 +134,9 @@ from datasets import Dataset from omegaconf import OmegaConf from ragbits.evaluate.dataset_generator.pipeline import DatasetGenerationPipeline + +# configuration should follow +# ragbits.evaluate.dataset_generator.DatasetGenerationPipelineConfig data model pipeline_config = OmegaConf.create( { "input_name": "chunk", From a81144f33f413a5a8d97c4515cb3821a390f36f4 Mon Sep 17 00:00:00 2001 From: kdziedzic68 Date: Wed, 20 Nov 2024 12:33:23 +0100 Subject: [PATCH 26/26] remove redundant comment --- .../src/ragbits/evaluate/dataset_generator/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py index 01d3fb6c..19e19e2b 100644 --- a/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py +++ b/packages/ragbits-evaluate/src/ragbits/evaluate/dataset_generator/utils.py @@ -34,7 +34,6 @@ def get_passages_list(raw_passages: str) -> list[str]: if match: passages_content = match.group(1) try: - # Use eval to convert the string to a list, assuming it's a valid list-like format return json.loads("[" + passages_content + "]") except (SyntaxError, ValueError): warnings.warn("Unable to evaluate the passages content. Check the format.", category=UserWarning)