From e3050d36d44e422b3ff3c0226237d6751f56f3e5 Mon Sep 17 00:00:00 2001 From: Egor Kraev Date: Sat, 20 Apr 2024 09:12:33 +0200 Subject: [PATCH 01/20] Support passing BasePromptTemplates as prompts into LLMTool --- motleycrew/tool/llm_tool.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/motleycrew/tool/llm_tool.py b/motleycrew/tool/llm_tool.py index 9d8991bd..7c71688d 100644 --- a/motleycrew/tool/llm_tool.py +++ b/motleycrew/tool/llm_tool.py @@ -2,6 +2,7 @@ from langchain_core.tools import Tool from langchain_core.prompts import PromptTemplate +from langchain_core.prompts.base import BasePromptTemplate from langchain_core.language_models import BaseLanguageModel from langchain_core.pydantic_v1 import BaseModel, Field @@ -15,7 +16,7 @@ def __init__( self, name: str, description: str, - prompt: str, + prompt: str | BasePromptTemplate, llm: Optional[BaseLanguageModel] = None, ): langchain_tool = create_llm_langchain_tool(name, description, prompt, llm) @@ -25,7 +26,7 @@ def __init__( def create_llm_langchain_tool( name: str, description: str, - prompt: str, + prompt: str | BasePromptTemplate, llm: Optional[BaseLanguageModel] = None, input_description: Optional[str] = "Input for the tool.", ): @@ -37,11 +38,13 @@ class LLMToolInput(BaseModel): input: str = Field(description=input_description) - p = PromptTemplate.from_template(prompt) - assert "input" in p.input_variables, "Prompt must contain an `input` variable" + if not isinstance(prompt, BasePromptTemplate): + prompt = PromptTemplate.from_template(prompt) + + assert "input" in prompt.input_variables, "Prompt must contain an `input` variable" def call_llm(input: str) -> str: - chain = p | llm + chain = prompt | llm return chain.invoke({"input": input}) return Tool.from_function( From d3bab13eed63e19cdd398394c1b2e7a30c40d90b Mon Sep 17 00:00:00 2001 From: Egor Kraev Date: Sat, 20 Apr 2024 09:54:41 +0200 Subject: [PATCH 02/20] Start of blog post draft example. --- examples/blog_post/blog_post.py | 134 ++++++++++++++++++++++++++ examples/blog_post/blog_post_input.py | 102 ++++++++++++++++++++ examples/image_generation_crewai.py | 4 +- motleycrew/tool/llm_tool.py | 16 +-- 4 files changed, 249 insertions(+), 7 deletions(-) create mode 100644 examples/blog_post/blog_post.py create mode 100644 examples/blog_post/blog_post_input.py diff --git a/examples/blog_post/blog_post.py b/examples/blog_post/blog_post.py new file mode 100644 index 00000000..2435d51e --- /dev/null +++ b/examples/blog_post/blog_post.py @@ -0,0 +1,134 @@ +from typing import Union, Sequence, List + +from dotenv import load_dotenv + + +from langchain.schema import AIMessage, HumanMessage, SystemMessage, BaseMessage +from langchain_core.prompts.chat import ChatPromptTemplate +from motleycrew.agent.langchain.react import ReactMotleyAgent + +from motleycrew.tool.llm_tool import LLMTool +from motleycrew import MotleyCrew, Task + +from .blog_post_input import text + +load_dotenv() + +# TODO: switch example to using URL instead of fixed text? +# from langchain.document_loaders import UnstructuredURLLoader +# from langchain.text_splitter import TokenTextSplitter +# def urls_to_messages(urls: Union[str, Sequence[str]]) -> List[HumanMessage]: +# if isinstance(urls, str): +# urls = [urls] +# loader = UnstructuredURLLoader(urls=urls) +# data = loader.load() +# text_splitter = TokenTextSplitter(chunk_size=1000, chunk_overlap=0) +# texts = text_splitter.split_documents(data) +# return [HumanMessage(content=d.page_content) for d in texts] + + +max_words = 500 +min_words = 450 + +editor_prompt = ChatPromptTemplate.from_messages( + [ + SystemMessage( + content="You are an experienced online blog post editor with 10 years of experience." + ), + HumanMessage( + content="""Review the blog post draft below (delimited by triple backticks) + and provide a critique and use specific examples from the text on what + should be done to improve the draft, with data professionals as the intended audience. + Also, suggest a catchy title for the story. + ```{input}``` + """ + ), + ] +) + +illustrator_prompt = ChatPromptTemplate.from_messages( + [ + SystemMessage( + content="You are a professional illustrator with 10 years of experience." + ), + HumanMessage( + content="You are given the following draft story, delimited by triple back quotes: ```{second_draft}```" + ), + HumanMessage( + content="""Your task is to specify the illustrations that would fit this story. + Make sure the illustrations are varied in style, eye-catching, and some of them humorous. + Describe each illustration in a way suitable for entering in a Midjourney prompt. + Each description should be detailed and verbose. Don't explain the purpose of the illustrations, just describe in great + detail what each illustration should show, in a way suitable for a generative image prompt. + There should be at most 5 and at least 3 illustrations. + Return the illustration descriptions as a list in the format + ["...", "...", ..., "..."] + """ + ), + ] +) + +seo_prompt = ChatPromptTemplate.from_messages( + [ + SystemMessage( + content="""Act as an SEO expert with 10 years of experience but ensure to + explain any SEO jargon for clarity when using it.""" + ), + HumanMessage( + content="""Review the blog post below (delimited by triple back quotes) and provide specific +examples from the text where to optimize its SEO content. +Recommend SEO-friendly titles and subtitles that could be used. +```{second_draft}``` +""" + ), + ] +) + +editor = LLMTool( + name="editor", + description="An editor providing constructive suggestions to improve the blog post submitted to it", + prompt=editor_prompt, +) + +# TODO: Turn it into an agent that calls the DALL-E tool +# and returns a dict {image_description: image_url} +illustrator = LLMTool( + name="illustrator", + description="An illustrator providing detailed descriptions of illustrations for a story", + prompt=illustrator_prompt, +) + +seo_expert = LLMTool( + name="seo_expert", + description="An SEO expert providing SEO optimization suggestions", + prompt=seo_prompt, +) + + +writer = ReactMotleyAgent( + prompt="You are a professional freelance copywriter with 10 years of experience." +) + +# Create tasks for your agents +crew = MotleyCrew() +task1 = Task( + crew=crew, + name="Write a blog post from the provided information", + description=f"""Write a blog post of at most {max_words} words and at least {min_words} + words based on the information provided. Keep the tone suitable for an audience of + data professionals, avoid superlatives and an overly excitable tone. + Don't discuss installation or testing. + The summary will be provided in one or multiple chunks, followed by . + + Proceed as follows: first, write a draft blog post as described above. + Then, submit it in turn to the editor, illustrator, and SEO expert for feedback. + In the case of the illustrator, insert the illustration descriptions it provides in + square brackets into the appropriate places in the draft. + In each case, revise the draft as per the response of the expert and submit it to the next expert. + + After you have implemented each expert's recommendations, return the final draft in markdown format. + + Return the blog post in markdown format. + Information begins: {text} """, + agent=writer, +) diff --git a/examples/blog_post/blog_post_input.py b/examples/blog_post/blog_post_input.py new file mode 100644 index 00000000..564977fb --- /dev/null +++ b/examples/blog_post/blog_post_input.py @@ -0,0 +1,102 @@ +text = """ +Wise Pizza: A library for automated figuring out most unusual segments +WisePizza is a library to find and visualise the most interesting slices in multidimensional data based on Lasso and LP solvers, which provides different functions to find segments whose average is most different from the global one or find segments most useful in explaining the difference between two datasets. + +The approach +WisePizza assumes you have a dataset with a number of discrete dimensions (could be currency, region, etc). For each combination of dimensions, the dataset must have a total value (total of the metric over that segment, for example the total volume in that region and currency), and an optional size value (set to 1 if not specified), this could for example be the total number of customers for that region and currency. The average value of the outcome for the segment is defined as total divided by size, in this example it would be the average volume per customer. + +explain_levels takes such a dataset and looks for a small number of 'simple' segments (each only constraining a small number of dimensions) that between them explain most of the variation in the averages; you could also think of them as the segments whose size-weighted deviation from the overall dataset average is the largest. This trades off unusual averages (which will naturally occur more for smaller segments) against segment size. + +Yet another way of looking at it is that we look for segments which, if their average was reset to the overall dataset average, would move overall total the most. + +explain_changes_in_totals and explain_changes_in_average take two datasets of the kind described above, with the same column names, and apply the same kind of logic to find the segments that contribute most to the difference (in total or average, respectively) between the two datasets, optionally splitting that into contributions from changes in segment size and changes in segment total. + +Sometimes, rather than explaining the change in totals from one period to the next, one wishes to explain a change in averages. The analytics of this are a little different - for example, while (as long as all weights and totals are positive) increasing a segment size (other things remaining equal) always increases the overall total, it can increase or decrease the pverall average, depending on whether the average value of that segment is below or above the overall average. + +Table of Contents +What can this do for you? +Find interesting slices +Comparison between two datasets +Installation +Quick Start +For Developers +Tests +What can this do for you? +The automated search for interesting segments can give you the following: + +1. Better information about segments and subsegments in your data +By using WisePizza and defining initial segments, you can find a segment which maximizes a specific outcome, such as adoption rates. + +2. Understanding differences in two time periods or two dataframes +If you have two time periods or two datasets, you can find segments that experience the largest change in the totals from previous period/dataset. + +Installation +You can always get the newest wise_pizza release using pip: https://pypi.org/project/wise-pizza/ + +pip install wise-pizza +From the command line (another way): + +pip install git+https://github.com/transferwise/wise-pizza.git +From Jupyter notebook (another way): + +!pip install git+https://github.com/transferwise/wise-pizza.git +Or you can clone and run from source, in which case you should pip -r requirements.txt before running. + +Quick Start +The wisepizza package can be used for finding segments with unusual average: + +sf = explain_levels( + df=data, + dims=dims, + total_name=totals, + size_name=size, + max_depth=2, + min_segments=20, + solver="lasso" +) +plot + +Or for finding changes between two datasets in totals: + +sf1 = explain_changes_in_totals( + df1=pre_data, + df2=data, + dims=dims, + total_name=totals, + size_name=size, + max_depth=2, + min_segments=20, + how="totals", + solver="lasso" +) +plot + +Or for finding changes between two datasets in average: + +sf1 = explain_changes_in_average( + df1=pre_data, + df2=data, + dims=dims, + total_name=totals, + size_name=size, + max_depth=2, + min_segments=20, + how="totals", + solver="lasso" +) +plot + +And then you can visualize differences: + +sf.plot() +And check segments: + +sf.segments +Please see the full example here + +For Developers +Testing +We use PyTest for testing. If you want to contribute code, make sure that the tests in tests/ run without errors. + +Wise-pizza is open sourced and maintained by Wise Plc. Copyright 2023 Wise Plc. +""" diff --git a/examples/image_generation_crewai.py b/examples/image_generation_crewai.py index b584fdc9..32c0de22 100644 --- a/examples/image_generation_crewai.py +++ b/examples/image_generation_crewai.py @@ -1,3 +1,5 @@ +import os.path + from dotenv import load_dotenv from motleycrew import MotleyCrew, Task @@ -8,7 +10,7 @@ load_dotenv() configure_logging(verbose=True) -image_generator_tool = DallEImageGeneratorTool() +image_generator_tool = DallEImageGeneratorTool(os.path.realpath("./images")) # For saving images locally use the line below # image_generator_tool = DallEImageGeneratorTool(images_directory="images") diff --git a/motleycrew/tool/llm_tool.py b/motleycrew/tool/llm_tool.py index 7c71688d..9b35f2b4 100644 --- a/motleycrew/tool/llm_tool.py +++ b/motleycrew/tool/llm_tool.py @@ -33,19 +33,23 @@ def create_llm_langchain_tool( if llm is None: llm = init_llm(llm_framework=LLMFramework.LANGCHAIN) + if not isinstance(prompt, BasePromptTemplate): + prompt = PromptTemplate.from_template(prompt) + + assert ( + len(prompt.input_variables) == 1 + ), "Prompt must contain exactly one input variable" + input_var = prompt.input_variables[0] + class LLMToolInput(BaseModel): """Input for the tool.""" + # TODO: how hard is it to get that name from prompt.input_variables? input: str = Field(description=input_description) - if not isinstance(prompt, BasePromptTemplate): - prompt = PromptTemplate.from_template(prompt) - - assert "input" in prompt.input_variables, "Prompt must contain an `input` variable" - def call_llm(input: str) -> str: chain = prompt | llm - return chain.invoke({"input": input}) + return chain.invoke({input_var: input}) return Tool.from_function( func=call_llm, From 54596f501cd7c8b17a11002d425b47e9a5f43f92 Mon Sep 17 00:00:00 2001 From: whimo Date: Thu, 25 Apr 2024 14:13:01 +0400 Subject: [PATCH 03/20] Introduce Kuzu graph store --- motleycrew/storage/__init__.py | 1 + motleycrew/storage/kuzu_graph_store.py | 232 +++++++++++++++++++++++++ poetry.lock | 56 +++++- pyproject.toml | 1 + 4 files changed, 289 insertions(+), 1 deletion(-) create mode 100644 motleycrew/storage/__init__.py create mode 100644 motleycrew/storage/kuzu_graph_store.py diff --git a/motleycrew/storage/__init__.py b/motleycrew/storage/__init__.py new file mode 100644 index 00000000..1c29f4b6 --- /dev/null +++ b/motleycrew/storage/__init__.py @@ -0,0 +1 @@ +from kuzu_graph_store import MotleyKuzuGraphStore diff --git a/motleycrew/storage/kuzu_graph_store.py b/motleycrew/storage/kuzu_graph_store.py new file mode 100644 index 00000000..8d29f646 --- /dev/null +++ b/motleycrew/storage/kuzu_graph_store.py @@ -0,0 +1,232 @@ +""" +Code derived from: https://github.com/run-llama/llama_index/blob/802064aee72b03ab38ead0cda780cfa3e37ce728/llama-index-integrations/graph_stores/llama-index-graph-stores-kuzu/llama_index/graph_stores/kuzu/base.py +Kùzu graph store index. +""" + +from typing import Any, Dict, List, Optional + +from llama_index.core.graph_stores.types import GraphStore + +import kuzu + + +class MotleyKuzuGraphStore(GraphStore): + def __init__( + self, + database: Any, + node_table_name: str = "entity", + rel_table_name: str = "links", + **kwargs: Any, + ) -> None: + self.database = database + self.connection = kuzu.Connection(database) + self.node_table_name = node_table_name + self.rel_table_name = rel_table_name + self.init_schema() + + def init_schema(self) -> None: + """Initialize schema if the tables do not exist.""" + node_tables = self.connection._get_node_table_names() + if self.node_table_name not in node_tables: + self.connection.execute( + "CREATE NODE TABLE %s (ID STRING, PRIMARY KEY(ID))" + % self.node_table_name + ) + rel_tables = self.connection._get_rel_table_names() + rel_tables = [rel_table["name"] for rel_table in rel_tables] + if self.rel_table_name not in rel_tables: + self.connection.execute( + "CREATE REL TABLE {} (FROM {} TO {}, predicate STRING)".format( + self.rel_table_name, self.node_table_name, self.node_table_name + ) + ) + + @property + def client(self) -> Any: + return self.connection + + def get(self, subj: str) -> List[List[str]]: + """Get triplets.""" + query = """ + MATCH (n1:%s)-[r:%s]->(n2:%s) + WHERE n1.ID = $subj + RETURN r.predicate, n2.ID; + """ + prepared_statement = self.connection.prepare( + query % (self.node_table_name, self.rel_table_name, self.node_table_name) + ) + query_result = self.connection.execute(prepared_statement, {"subj": subj}) + retval = [] + while query_result.has_next(): + row = query_result.get_next() + retval.append([row[0], row[1]]) + return retval + + def get_rel_map( + self, subjs: Optional[List[str]] = None, depth: int = 2, limit: int = 30 + ) -> Dict[str, List[List[str]]]: + """Get depth-aware rel map.""" + rel_wildcard = "r:%s*1..%d" % (self.rel_table_name, depth) + match_clause = "MATCH (n1:{})-[{}]->(n2:{})".format( + self.node_table_name, + rel_wildcard, + self.node_table_name, + ) + return_clause = "RETURN n1, r, n2 LIMIT %d" % limit + params = [] + if subjs is not None: + for i, curr_subj in enumerate(subjs): + if i == 0: + where_clause = "WHERE n1.ID = $%d" % i + else: + where_clause += " OR n1.ID = $%d" % i + params.append((str(i), curr_subj)) + else: + where_clause = "" + query = f"{match_clause} {where_clause} {return_clause}" + prepared_statement = self.connection.prepare(query) + if subjs is not None: + query_result = self.connection.execute( + prepared_statement, {k: v for k, v in params} + ) + else: + query_result = self.connection.execute(prepared_statement) + retval: Dict[str, List[List[str]]] = {} + while query_result.has_next(): + row = query_result.get_next() + curr_path = [] + subj = row[0] + recursive_rel = row[1] + obj = row[2] + nodes_map = {} + nodes_map[(subj["_id"]["table"], subj["_id"]["offset"])] = subj["ID"] + nodes_map[(obj["_id"]["table"], obj["_id"]["offset"])] = obj["ID"] + for node in recursive_rel["_nodes"]: + nodes_map[(node["_id"]["table"], node["_id"]["offset"])] = node["ID"] + for rel in recursive_rel["_rels"]: + predicate = rel["predicate"] + curr_subj_id = nodes_map[(rel["_src"]["table"], rel["_src"]["offset"])] + curr_path.append(curr_subj_id) + curr_path.append(predicate) + # Add the last node + curr_path.append(obj["ID"]) + if subj["ID"] not in retval: + retval[subj["ID"]] = [] + retval[subj["ID"]].append(curr_path) + return retval + + def upsert_triplet(self, subj: str, rel: str, obj: str) -> None: + """Add triplet.""" + + def check_entity_exists(connection: Any, entity: str) -> bool: + is_exists_result = connection.execute( + "MATCH (n:%s) WHERE n.ID = $entity RETURN n.ID" % self.node_table_name, + {"entity": entity}, + ) + return is_exists_result.has_next() + + def create_entity(connection: Any, entity: str) -> None: + connection.execute( + "CREATE (n:%s {ID: $entity})" % self.node_table_name, + {"entity": entity}, + ) + + def check_rel_exists(connection: Any, subj: str, obj: str, rel: str) -> bool: + is_exists_result = connection.execute( + ( + "MATCH (n1:{})-[r:{}]->(n2:{}) WHERE n1.ID = $subj AND n2.ID = " + "$obj AND r.predicate = $pred RETURN r.predicate" + ).format( + self.node_table_name, self.rel_table_name, self.node_table_name + ), + {"subj": subj, "obj": obj, "pred": rel}, + ) + return is_exists_result.has_next() + + def create_rel(connection: Any, subj: str, obj: str, rel: str) -> None: + connection.execute( + ( + "MATCH (n1:{}), (n2:{}) WHERE n1.ID = $subj AND n2.ID = $obj " + "CREATE (n1)-[r:{} {{predicate: $pred}}]->(n2)" + ).format( + self.node_table_name, self.node_table_name, self.rel_table_name + ), + {"subj": subj, "obj": obj, "pred": rel}, + ) + + is_subj_exists = check_entity_exists(self.connection, subj) + is_obj_exists = check_entity_exists(self.connection, obj) + + if not is_subj_exists: + create_entity(self.connection, subj) + if not is_obj_exists: + create_entity(self.connection, obj) + + if is_subj_exists and is_obj_exists: + is_rel_exists = check_rel_exists(self.connection, subj, obj, rel) + if is_rel_exists: + return + + create_rel(self.connection, subj, obj, rel) + + def delete(self, subj: str, rel: str, obj: str) -> None: + """Delete triplet.""" + + def delete_rel(connection: Any, subj: str, obj: str, rel: str) -> None: + connection.execute( + ( + "MATCH (n1:{})-[r:{}]->(n2:{}) WHERE n1.ID = $subj AND n2.ID" + " = $obj AND r.predicate = $pred DELETE r" + ).format( + self.node_table_name, self.rel_table_name, self.node_table_name + ), + {"subj": subj, "obj": obj, "pred": rel}, + ) + + def delete_entity(connection: Any, entity: str) -> None: + connection.execute( + "MATCH (n:%s) WHERE n.ID = $entity DELETE n" % self.node_table_name, + {"entity": entity}, + ) + + def check_edges(connection: Any, entity: str) -> bool: + is_exists_result = connection.execute( + "MATCH (n1:{})-[r:{}]-(n2:{}) WHERE n2.ID = $entity RETURN r.predicate".format( + self.node_table_name, self.rel_table_name, self.node_table_name + ), + {"entity": entity}, + ) + return is_exists_result.has_next() + + delete_rel(self.connection, subj, obj, rel) + if not check_edges(self.connection, subj): + delete_entity(self.connection, subj) + if not check_edges(self.connection, obj): + delete_entity(self.connection, obj) + + @classmethod + def from_persist_dir( + cls, + persist_dir: str, + node_table_name: str = "entity", + rel_table_name: str = "links", + ) -> "MotleyKuzuGraphStore": + """Load from persist dir.""" + try: + import kuzu + except ImportError: + raise ImportError("Please install kuzu: pip install kuzu") + database = kuzu.Database(persist_dir) + return cls(database, node_table_name, rel_table_name) + + @classmethod + def from_dict(cls, config_dict: Dict[str, Any]) -> "MotleyKuzuGraphStore": + """Initialize graph store from configuration dictionary. + + Args: + config_dict: Configuration dictionary. + + Returns: + Graph store. + """ + return cls(**config_dict) diff --git a/poetry.lock b/poetry.lock index d5f248a1..433afd5b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1059,6 +1059,45 @@ files = [ {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, ] +[[package]] +name = "kuzu" +version = "0.1.0" +description = "An in-process property graph database management system built for query speed and scalability." +optional = false +python-versions = "*" +files = [ + {file = "kuzu-0.1.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:f60ca54ef3c8627665e376ffa0f044ee922f4cac2b32ed0bb2d20cd1589993be"}, + {file = "kuzu-0.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e3b9302b2b12aa504e22be8f71f58a44594f4609027b791db3b58f27d3ec7eea"}, + {file = "kuzu-0.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a817b73edfb79382cef15db5b9cdd57cef9a95f398811ad5d7ae6a4329ff9613"}, + {file = "kuzu-0.1.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:45605768da01d9c4d9047379905b8dd83d0be30fb0bd8c49ab862c84685ea7e3"}, + {file = "kuzu-0.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9e410b9c7785db065b58707ede9df2f611e15bd3da4d3f4ac8f28e45f651a030"}, + {file = "kuzu-0.1.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:16543b69ddb7171c723cd77df38d338d6c5607bcfaee700c71abb686293f0346"}, + {file = "kuzu-0.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8b4492a904c7345428fc2493583dca1f4f5ee7cdb590006e4648b613ee8fb8b1"}, + {file = "kuzu-0.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8f459426c7a92887a0ee235b87295fdab6ab70e4e1d52647f3b31750784d699"}, + {file = "kuzu-0.1.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3ceebcae55ea47867a6a80c16103164167c4a8fa670184168f0cf91dd82337a"}, + {file = "kuzu-0.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:c24fd6b9633081a79ceedb757a33f028ece31b9d691414db8f97e1be67739462"}, + {file = "kuzu-0.1.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:a0b64068983c289dee9fc624dde4b697e0307bd6c3e90ffcdc8d7d61c4fa450e"}, + {file = "kuzu-0.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d9bea48a3db41a75413b531caa8fd7eb0bbf67357181cebac806d9c188550d1c"}, + {file = "kuzu-0.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ac9171197c0d6ff8b5adc30043c0a3f012d2fc17529c7afcf9c5659db4cf69"}, + {file = "kuzu-0.1.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ec0557cda5735ba03bf28fe766180f3fa92319e3e1611e744248f4ed28357423"}, + {file = "kuzu-0.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:229e1902d1c82faef0fc07dc617ec82d927131dd519a4908f61cfce0f452ba74"}, + {file = "kuzu-0.1.0-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:07b1eb930afe5efd392fc0ad5a58ea27216a464e95231661aacbd0013294cee0"}, + {file = "kuzu-0.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c954af85a5be173741322e239a4c1d8a274d0ff2d0a006386dba587aa3e64c8"}, + {file = "kuzu-0.1.0-cp37-cp37m-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4d761b610ba83448cc6ba51ddd376489d525dddcf8d74492de15cd472ecffd7c"}, + {file = "kuzu-0.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:cdf8dd2277e595719a6706e6340912323dc4db067c8169e24949f4746d467a96"}, + {file = "kuzu-0.1.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:bd0ab78ba72c8befb275d6ad55f19e738f9387f7540fa36faf413ecb76bb8563"}, + {file = "kuzu-0.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6b1fe702f671063c626bd1390cf20910b49893192ae77b303acd6b9f3f62f661"}, + {file = "kuzu-0.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b488f83e5fc4a1bf6a15ed70712e4c2b9c6296f25c4c2a9f8e8015e81f831151"}, + {file = "kuzu-0.1.0-cp38-cp38-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:61ab2d6aa98bee1bf7c3f6893a1706a408531846d1de5b9624eb490ca2117b2c"}, + {file = "kuzu-0.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:80000e12a0703f5062ab3560e701a6a1b2e10992c31de6912f9e5291509b1659"}, + {file = "kuzu-0.1.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:967a99fb3cdea73c3bfa86c45a78ed896468318f6061e42c352e3df96b370e0f"}, + {file = "kuzu-0.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:65d72f536ed91aa114dae8400e5cdd22ba05540782de39462a7da550ed3d438a"}, + {file = "kuzu-0.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74a5eb43e9ae5b1e4ca3cf61cd845c3e300dc737284fda384441af01ad05bd54"}, + {file = "kuzu-0.1.0-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2e78bd924cdcace890e7ed25917d8221b941f239a05391c5d2a08c70b516096e"}, + {file = "kuzu-0.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:37432b996f706354b6c469b6ea1bdd9d7fba4d61256f041e723fc8eefbedcadd"}, + {file = "kuzu-0.1.0.tar.gz", hash = "sha256:12883c9f7d1621c8f4ab50b85ff89d011aef45f7f1328a2c1c6417d5e1456cae"}, +] + [[package]] name = "langchain" version = "0.1.16" @@ -1346,6 +1385,21 @@ files = [ [package.dependencies] llama-index-core = ">=0.10.1,<0.11.0" +[[package]] +name = "llama-index-graph-stores-kuzu" +version = "0.1.2" +description = "llama-index graph stores kuzu integration" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "llama_index_graph_stores_kuzu-0.1.2-py3-none-any.whl", hash = "sha256:eba188bc6afdc246cfd15114359fb8d2db4a34e88e3e96385a383d4fbc7a1dcd"}, + {file = "llama_index_graph_stores_kuzu-0.1.2.tar.gz", hash = "sha256:0458860bde83d28cdd760a0cb5502cb0b191cae5896d814c1c420dcb99dbb8fd"}, +] + +[package.dependencies] +kuzu = ">=0.1.0,<0.2.0" +llama-index-core = ">=0.10.1,<0.11.0" + [[package]] name = "llama-index-indices-managed-llama-cloud" version = "0.1.5" @@ -3274,4 +3328,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.10,<=3.13" -content-hash = "2b4c279975a9bf627bdd621f23b95cc401ee4bfa70be9acb7e1adea510e535e1" +content-hash = "a9a8a5a94aa310dc866ec41a0fba6c0ff748d27d71cd38d5b5012d9f247a2357" diff --git a/pyproject.toml b/pyproject.toml index edbbe692..24be4bbb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ langchain-experimental = "^0.0.57" python-dotenv = "^1.0.0" lunary = "^1.0.3" langchainhub = "^0.1.15" +llama-index-graph-stores-kuzu = "^0.1.2" [tool.poetry.group.dev.dependencies] black = "^24.2.0" From 4df5afcc8ffbf21a2117fd2c5441ed1d6d4b3076 Mon Sep 17 00:00:00 2001 From: Egor Kraev Date: Thu, 25 Apr 2024 13:27:30 +0200 Subject: [PATCH 04/20] First cut of question insertion tool --- motleycrew/tool/question_insertion_tool.py | 74 ++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 motleycrew/tool/question_insertion_tool.py diff --git a/motleycrew/tool/question_insertion_tool.py b/motleycrew/tool/question_insertion_tool.py new file mode 100644 index 00000000..9c737f1a --- /dev/null +++ b/motleycrew/tool/question_insertion_tool.py @@ -0,0 +1,74 @@ +from typing import List + +from pathlib import Path + +from langchain_core.pydantic_v1 import BaseModel, Field +from langchain_core.tools import Tool + +# TODO: fallback interface if LlamaIndex is not available +from llama_index.core.graph_stores.types import GraphStore + +from motleycrew.tool import MotleyTool + + +class QuestionInsertionTool(MotleyTool): + def __init__(self, node_id: str, graph: GraphStore): + + langchain_tool = create_question_insertion_langchain_tool( + name="Question Insertion Tool", + description="Insert a list of questions (supplied as a list of strings) into the graph.", + node_id=node_id, + graph=graph, + ) + + super().__init__(langchain_tool) + + +class QuestionInsertionToolInput(BaseModel): + """Subquestions of the current question, to be inserted into the knowledge graph.""" + + questions: List[str] = Field( + description="List of questions to be inserted into the knowledge graph." + ) + + +def create_question_insertion_langchain_tool( + name: str, + description: str, + node_id: str, + graph: GraphStore, +): + def insert_questions(questions: list[str]) -> None: + for question in questions: + # TODO: change! This is a placeholder implementation + graph.upsert_triplet(node_id, "IS_SUBQUESTION", question) + + return Tool.from_function( + func=insert_questions, + name=name, + description=description, + args_schema=QuestionInsertionToolInput, + ) + + +if __name__ == "__main__": + import kuzu + from llama_index.graph_stores.kuzu import KuzuGraphStore + + here = Path(__file__).parent + db_path = here / "test1" + db = kuzu.Database(db_path) + graph_store = KuzuGraphStore(db) + + children_1 = ["What is the capital of France?", "What is the capital of Germany?"] + children_2 = ["What is the capital of Italy?", "What is the capital of Spain?"] + tool = QuestionInsertionTool(node_id="Starting question", graph=graph_store) + tool.invoke({"questions": children_1}) + tool2 = QuestionInsertionTool( + node_id="What is the capital of France?", graph=graph_store + ) + tool2.invoke({"questions": children_2}) + print( + f"docker run -p 8000:8000 -v {db_path}:/database --rm kuzudb/explorer: latest" + ) + print("MATCH (A)-[r]->(B) RETURN *;") From dc0c4c9eee3f6bd73bd6584339e53658072a7669 Mon Sep 17 00:00:00 2001 From: whimo Date: Thu, 25 Apr 2024 17:11:07 +0400 Subject: [PATCH 05/20] Graph store implementation draft --- motleycrew/storage/__init__.py | 2 +- motleycrew/storage/kuzu_graph_store.py | 238 +++++++++++-------------- 2 files changed, 104 insertions(+), 136 deletions(-) diff --git a/motleycrew/storage/__init__.py b/motleycrew/storage/__init__.py index 1c29f4b6..096759ad 100644 --- a/motleycrew/storage/__init__.py +++ b/motleycrew/storage/__init__.py @@ -1 +1 @@ -from kuzu_graph_store import MotleyKuzuGraphStore +from .kuzu_graph_store import MotleyQuestionGraphStore diff --git a/motleycrew/storage/kuzu_graph_store.py b/motleycrew/storage/kuzu_graph_store.py index 8d29f646..3e0f7a24 100644 --- a/motleycrew/storage/kuzu_graph_store.py +++ b/motleycrew/storage/kuzu_graph_store.py @@ -10,11 +10,13 @@ import kuzu -class MotleyKuzuGraphStore(GraphStore): +class MotleyQuestionGraphStore(GraphStore): + IS_SUBQUESTION_PREDICATE = "IS_SUBQUESTION" + def __init__( self, database: Any, - node_table_name: str = "entity", + node_table_name: str = "question", rel_table_name: str = "links", **kwargs: Any, ) -> None: @@ -29,7 +31,7 @@ def init_schema(self) -> None: node_tables = self.connection._get_node_table_names() if self.node_table_name not in node_tables: self.connection.execute( - "CREATE NODE TABLE %s (ID STRING, PRIMARY KEY(ID))" + "CREATE NODE TABLE %s (ID SERIAL, question STRING, answer STRING, context STRING[], PRIMARY KEY(ID))" % self.node_table_name ) rel_tables = self.connection._get_rel_table_names() @@ -45,164 +47,103 @@ def init_schema(self) -> None: def client(self) -> Any: return self.connection - def get(self, subj: str) -> List[List[str]]: - """Get triplets.""" + def check_question_exists(self, question_id: int) -> bool: + is_exists_result = self.connection.execute( + "MATCH (n:%s) WHERE n.ID = $question_id RETURN n.ID" % self.node_table_name, + {"question_id": question_id}, + ) + return is_exists_result.has_next() + + def get_question(self, question_id: int) -> Optional[dict]: query = """ - MATCH (n1:%s)-[r:%s]->(n2:%s) - WHERE n1.ID = $subj - RETURN r.predicate, n2.ID; + MATCH (n1:%s) + WHERE n1.ID = $question_id + RETURN n1; """ + prepared_statement = self.connection.prepare(query % self.node_table_name) + query_result = self.connection.execute(prepared_statement, {"question_id": question_id}) + + if query_result.has_next(): + row = query_result.get_next() + return row[0] + + def get_subquestions(self, question_id: int) -> List[int]: + query = """ + MATCH (n1:%s)-[r:%s]->(n2:%s) + WHERE n1.ID = $question_id + AND r.predicate = $is_subquestion_predicate + RETURN n2.ID; + """ prepared_statement = self.connection.prepare( query % (self.node_table_name, self.rel_table_name, self.node_table_name) ) - query_result = self.connection.execute(prepared_statement, {"subj": subj}) + query_result = self.connection.execute( + prepared_statement, + { + "question_id": question_id, + "is_subquestion_predicate": MotleyQuestionGraphStore.IS_SUBQUESTION_PREDICATE, + }, + ) retval = [] while query_result.has_next(): row = query_result.get_next() - retval.append([row[0], row[1]]) + retval.append(row[0]) return retval - def get_rel_map( - self, subjs: Optional[List[str]] = None, depth: int = 2, limit: int = 30 - ) -> Dict[str, List[List[str]]]: - """Get depth-aware rel map.""" - rel_wildcard = "r:%s*1..%d" % (self.rel_table_name, depth) - match_clause = "MATCH (n1:{})-[{}]->(n2:{})".format( - self.node_table_name, - rel_wildcard, - self.node_table_name, + def create_question(self, question: str) -> int: + create_result = self.connection.execute( + "CREATE (n:%s {question: $question}) " "RETURN n.ID" % self.node_table_name, + {"question": question}, ) - return_clause = "RETURN n1, r, n2 LIMIT %d" % limit - params = [] - if subjs is not None: - for i, curr_subj in enumerate(subjs): - if i == 0: - where_clause = "WHERE n1.ID = $%d" % i - else: - where_clause += " OR n1.ID = $%d" % i - params.append((str(i), curr_subj)) - else: - where_clause = "" - query = f"{match_clause} {where_clause} {return_clause}" - prepared_statement = self.connection.prepare(query) - if subjs is not None: - query_result = self.connection.execute( - prepared_statement, {k: v for k, v in params} - ) - else: - query_result = self.connection.execute(prepared_statement) - retval: Dict[str, List[List[str]]] = {} - while query_result.has_next(): - row = query_result.get_next() - curr_path = [] - subj = row[0] - recursive_rel = row[1] - obj = row[2] - nodes_map = {} - nodes_map[(subj["_id"]["table"], subj["_id"]["offset"])] = subj["ID"] - nodes_map[(obj["_id"]["table"], obj["_id"]["offset"])] = obj["ID"] - for node in recursive_rel["_nodes"]: - nodes_map[(node["_id"]["table"], node["_id"]["offset"])] = node["ID"] - for rel in recursive_rel["_rels"]: - predicate = rel["predicate"] - curr_subj_id = nodes_map[(rel["_src"]["table"], rel["_src"]["offset"])] - curr_path.append(curr_subj_id) - curr_path.append(predicate) - # Add the last node - curr_path.append(obj["ID"]) - if subj["ID"] not in retval: - retval[subj["ID"]] = [] - retval[subj["ID"]].append(curr_path) - return retval - - def upsert_triplet(self, subj: str, rel: str, obj: str) -> None: - """Add triplet.""" - - def check_entity_exists(connection: Any, entity: str) -> bool: - is_exists_result = connection.execute( - "MATCH (n:%s) WHERE n.ID = $entity RETURN n.ID" % self.node_table_name, - {"entity": entity}, - ) - return is_exists_result.has_next() - - def create_entity(connection: Any, entity: str) -> None: - connection.execute( - "CREATE (n:%s {ID: $entity})" % self.node_table_name, - {"entity": entity}, - ) - - def check_rel_exists(connection: Any, subj: str, obj: str, rel: str) -> bool: - is_exists_result = connection.execute( - ( - "MATCH (n1:{})-[r:{}]->(n2:{}) WHERE n1.ID = $subj AND n2.ID = " - "$obj AND r.predicate = $pred RETURN r.predicate" - ).format( - self.node_table_name, self.rel_table_name, self.node_table_name - ), - {"subj": subj, "obj": obj, "pred": rel}, - ) - return is_exists_result.has_next() + assert create_result.has_next() + return create_result.get_next()[0] - def create_rel(connection: Any, subj: str, obj: str, rel: str) -> None: + def create_subquestion(self, question_id: int, subquestion: str) -> int: + def create_subquestion_rel(connection: Any, question_id: int, subquestion_id: int) -> None: connection.execute( ( - "MATCH (n1:{}), (n2:{}) WHERE n1.ID = $subj AND n2.ID = $obj " - "CREATE (n1)-[r:{} {{predicate: $pred}}]->(n2)" - ).format( - self.node_table_name, self.node_table_name, self.rel_table_name - ), - {"subj": subj, "obj": obj, "pred": rel}, + "MATCH (n1:{}), (n2:{}) WHERE n1.ID = $question_id AND n2.ID = $subquestion_id " + "CREATE (n1)-[r:{} {{predicate: $is_subquestion_predicate}}]->(n2)" + ).format(self.node_table_name, self.node_table_name, self.rel_table_name), + { + "question_id": question_id, + "subquestion_id": subquestion_id, + "is_subquestion_predicate": MotleyQuestionGraphStore.IS_SUBQUESTION_PREDICATE, + }, ) - is_subj_exists = check_entity_exists(self.connection, subj) - is_obj_exists = check_entity_exists(self.connection, obj) - - if not is_subj_exists: - create_entity(self.connection, subj) - if not is_obj_exists: - create_entity(self.connection, obj) - - if is_subj_exists and is_obj_exists: - is_rel_exists = check_rel_exists(self.connection, subj, obj, rel) - if is_rel_exists: - return + if not self.check_question_exists(question_id): + raise Exception(f"No question with id {question_id}") - create_rel(self.connection, subj, obj, rel) + subquestion_id = self.create_question(subquestion) + create_subquestion_rel(self.connection, question_id=question_id, subquestion_id=subquestion_id) + return subquestion_id - def delete(self, subj: str, rel: str, obj: str) -> None: - """Delete triplet.""" + def delete_question(self, question_id: int) -> None: + """Deletes question and its relations.""" - def delete_rel(connection: Any, subj: str, obj: str, rel: str) -> None: + def delete_rels(connection: Any, question_id: int) -> None: connection.execute( - ( - "MATCH (n1:{})-[r:{}]->(n2:{}) WHERE n1.ID = $subj AND n2.ID" - " = $obj AND r.predicate = $pred DELETE r" - ).format( + "MATCH (n1:{})-[r:{}]->(n2:{}) WHERE n1.ID = $question_id DELETE r".format( self.node_table_name, self.rel_table_name, self.node_table_name ), - {"subj": subj, "obj": obj, "pred": rel}, + {"question_id": question_id}, ) - - def delete_entity(connection: Any, entity: str) -> None: connection.execute( - "MATCH (n:%s) WHERE n.ID = $entity DELETE n" % self.node_table_name, - {"entity": entity}, - ) - - def check_edges(connection: Any, entity: str) -> bool: - is_exists_result = connection.execute( - "MATCH (n1:{})-[r:{}]-(n2:{}) WHERE n2.ID = $entity RETURN r.predicate".format( + "MATCH (n1:{})<-[r:{}]-(n2:{}) WHERE n1.ID = $question_id DELETE r".format( self.node_table_name, self.rel_table_name, self.node_table_name ), - {"entity": entity}, + {"question_id": question_id}, ) - return is_exists_result.has_next() - delete_rel(self.connection, subj, obj, rel) - if not check_edges(self.connection, subj): - delete_entity(self.connection, subj) - if not check_edges(self.connection, obj): - delete_entity(self.connection, obj) + def delete_question(connection: Any, question_id: int) -> None: + connection.execute( + "MATCH (n:%s) WHERE n.ID = $question_id DELETE n" % self.node_table_name, + {"question_id": question_id}, + ) + + delete_rels(self.connection, question_id) + delete_question(self.connection, question_id) @classmethod def from_persist_dir( @@ -210,7 +151,7 @@ def from_persist_dir( persist_dir: str, node_table_name: str = "entity", rel_table_name: str = "links", - ) -> "MotleyKuzuGraphStore": + ) -> "MotleyQuestionGraphStore": """Load from persist dir.""" try: import kuzu @@ -220,7 +161,7 @@ def from_persist_dir( return cls(database, node_table_name, rel_table_name) @classmethod - def from_dict(cls, config_dict: Dict[str, Any]) -> "MotleyKuzuGraphStore": + def from_dict(cls, config_dict: Dict[str, Any]) -> "MotleyQuestionGraphStore": """Initialize graph store from configuration dictionary. Args: @@ -230,3 +171,30 @@ def from_dict(cls, config_dict: Dict[str, Any]) -> "MotleyKuzuGraphStore": Graph store. """ return cls(**config_dict) + + +if __name__ == "__main__": + from pathlib import Path + import kuzu + + here = Path(__file__).parent + db_path = here / "test1" + db = kuzu.Database(str(db_path)) + graph_store = MotleyQuestionGraphStore(db) + + q1_id = graph_store.create_question("q1") + assert graph_store.get_question(q1_id)["question"] == "q1" + + q2_id = graph_store.create_subquestion(q1_id, "q2") + q3_id = graph_store.create_subquestion(q1_id, "q3") + q4_id = graph_store.create_subquestion(q3_id, "q4") + + assert set(graph_store.get_subquestions(q1_id)) == {q2_id, q3_id} + assert set(graph_store.get_subquestions(q3_id)) == {q4_id} + + graph_store.delete_question(q4_id) + assert graph_store.get_question(q4_id) is None + assert not graph_store.get_subquestions(q3_id) + + print(f"docker run -p 8000:8000 -v {db_path}:/database --rm kuzudb/explorer: latest") + print("MATCH (A)-[r]->(B) RETURN *;") From e6ff0a14ecbaa3b352ed9a6f9a3654e30b5d2f53 Mon Sep 17 00:00:00 2001 From: whimo Date: Thu, 25 Apr 2024 17:12:29 +0400 Subject: [PATCH 06/20] Remove redundant inheritance --- motleycrew/storage/kuzu_graph_store.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/motleycrew/storage/kuzu_graph_store.py b/motleycrew/storage/kuzu_graph_store.py index 3e0f7a24..e930c281 100644 --- a/motleycrew/storage/kuzu_graph_store.py +++ b/motleycrew/storage/kuzu_graph_store.py @@ -5,12 +5,10 @@ from typing import Any, Dict, List, Optional -from llama_index.core.graph_stores.types import GraphStore - import kuzu -class MotleyQuestionGraphStore(GraphStore): +class MotleyQuestionGraphStore: IS_SUBQUESTION_PREDICATE = "IS_SUBQUESTION" def __init__( @@ -175,7 +173,6 @@ def from_dict(cls, config_dict: Dict[str, Any]) -> "MotleyQuestionGraphStore": if __name__ == "__main__": from pathlib import Path - import kuzu here = Path(__file__).parent db_path = here / "test1" From 5efcf7ca325a07692d92711923395309a6e07c91 Mon Sep 17 00:00:00 2001 From: Egor Kraev Date: Fri, 26 Apr 2024 10:05:19 +0200 Subject: [PATCH 07/20] Question generator and inserter tools --- examples/blog_post/blog_post.py | 10 +- .../agent/langchain/openai_tools_react.py | 10 +- .../agent/research/question_generator.py | 172 ++++++++++++++++++ motleycrew/common/utils.py | 4 + motleycrew/tool/question_insertion_tool.py | 14 +- 5 files changed, 194 insertions(+), 16 deletions(-) create mode 100644 motleycrew/agent/research/question_generator.py diff --git a/examples/blog_post/blog_post.py b/examples/blog_post/blog_post.py index 2435d51e..bdfc6efb 100644 --- a/examples/blog_post/blog_post.py +++ b/examples/blog_post/blog_post.py @@ -2,7 +2,7 @@ from dotenv import load_dotenv - +from llama_index.graph_stores.kuzu import KuzuGraphStore from langchain.schema import AIMessage, HumanMessage, SystemMessage, BaseMessage from langchain_core.prompts.chat import ChatPromptTemplate from motleycrew.agent.langchain.react import ReactMotleyAgent @@ -58,7 +58,8 @@ content="""Your task is to specify the illustrations that would fit this story. Make sure the illustrations are varied in style, eye-catching, and some of them humorous. Describe each illustration in a way suitable for entering in a Midjourney prompt. - Each description should be detailed and verbose. Don't explain the purpose of the illustrations, just describe in great + Each description should be detailed and verbose. Don't explain the purpose of the illustrations, + just describe in great detail what each illustration should show, in a way suitable for a generative image prompt. There should be at most 5 and at least 3 illustrations. Return the illustration descriptions as a list in the format @@ -106,7 +107,8 @@ writer = ReactMotleyAgent( - prompt="You are a professional freelance copywriter with 10 years of experience." + prompt="You are a professional freelance copywriter with 10 years of experience.", + tools=[editor, illustrator, seo_expert], ) # Create tasks for your agents @@ -132,3 +134,5 @@ Information begins: {text} """, agent=writer, ) + +crew.run(verbose=2) diff --git a/motleycrew/agent/langchain/openai_tools_react.py b/motleycrew/agent/langchain/openai_tools_react.py index 9aec9bba..0c7d51db 100644 --- a/motleycrew/agent/langchain/openai_tools_react.py +++ b/motleycrew/agent/langchain/openai_tools_react.py @@ -9,13 +9,15 @@ from langchain.tools.render import render_text_description from langchain_core.agents import AgentFinish, AgentActionMessageLog -from langchain.agents.format_scratchpad.openai_tools import format_to_openai_tool_messages +from langchain.agents.format_scratchpad.openai_tools import ( + format_to_openai_tool_messages, +) from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser from motleycrew.agent.parent import MotleyAgentAbstractParent from motleycrew.agent.langchain.langchain import LangchainMotleyAgentParent from motleycrew.common import MotleySupportedTool - +from motleycrew.common.utils import print_passthrough default_think_prompt = ChatPromptTemplate.from_template( """ @@ -67,10 +69,6 @@ ) -def print_passthrough(x): - return x - - def add_thought_to_background(x: dict): out = x["background"] out["agent_scratchpad"] += [x["thought"]] diff --git a/motleycrew/agent/research/question_generator.py b/motleycrew/agent/research/question_generator.py new file mode 100644 index 00000000..9a2223d1 --- /dev/null +++ b/motleycrew/agent/research/question_generator.py @@ -0,0 +1,172 @@ +from typing import List, Optional, Dict, Any +import json +from pathlib import Path + +from langchain_core.language_models import BaseLanguageModel +from langchain_core.runnables import ( + RunnablePassthrough, + RunnableLambda, + RunnableParallel, +) +from langchain_core.tools import Tool +from langchain_core.prompts.base import BasePromptTemplate +from langchain_core.prompts import PromptTemplate + +from langchain_core.pydantic_v1 import BaseModel, Field + +# TODO: fallback interface if LlamaIndex is not available +from llama_index.core.graph_stores.types import GraphStore + +from motleycrew.tool import MotleyTool +from motleycrew.common import LLMFramework +from motleycrew.common.llms import init_llm +from motleycrew.tool.question_insertion_tool import QuestionInsertionTool +from motleycrew.common.utils import print_passthrough + +default_prompt = PromptTemplate.from_template( + """ +You are a part of a team. The ultimate goal of your team is to +answer the following Question: '{question}'.\n +Your team has discovered some new text (delimited by ```) that may be relevant to your ultimate goal. +text: \n ``` {context} ``` \n +Your task is to ask new questions that may help your team achieve the ultimate goal. +If you think that the text is relevant to your ultimate goal, then ask new questions. +New questions should be based only on the text and the goal Question and no other previous knowledge. + +You can ask up to {num_questions} new questions. +Return the questions as a json list of strings, don't return anything else +except this valid json list of strings. +""" +) + +# " The new questions should have no semantic overlap with questions in the following list:\n" +# " {previous_questions}\n" + + +class QuestionGeneratorTool(MotleyTool): + """ + Gets a question as input + Retrieves relevant docs (llama index basic RAG) + (Retrieves existing questions from graph (to avoid overlap)) + Generates extra questions (research agent prompt) + + Adds questions as children of current q by calling Q insertion tool once + exits + """ + + def __init__( + self, + query_tool: MotleyTool, + graph: GraphStore, + max_questions: int = 3, + llm: Optional[BaseLanguageModel] = None, + prompt: str | BasePromptTemplate = None, + ): + langchain_tool = create_question_generator_langchain_tool( + query_tool=query_tool, + graph=graph, + max_questions=max_questions, + llm=llm, + prompt=prompt, + ) + + super().__init__(langchain_tool) + + +class QuestionGeneratorToolInput(BaseModel): + """Input for the Question Generator Tool.""" + + question: str = Field( + description="The input question for which to generate subquestions." + ) + + +def create_question_generator_langchain_tool( + query_tool: MotleyTool, + graph: GraphStore, + max_questions: int = 3, + llm: Optional[BaseLanguageModel] = None, + prompt: str | BasePromptTemplate = None, +): + if llm is None: + llm = init_llm(llm_framework=LLMFramework.LANGCHAIN) + + llm.bind(json_mode=True) + + if prompt is None: + prompt = default_prompt + elif isinstance(prompt, str): + prompt = PromptTemplate.from_template(prompt) + + assert isinstance( + prompt, BasePromptTemplate + ), "Prompt must be a string or a BasePromptTemplate" + + def partial_inserter(question: dict[str, str]): + out = QuestionInsertionTool( + graph=graph, question=question["question"] + ).to_langchain_tool() + return (out,) + + def insert_questions(input_dict) -> None: + inserter = input_dict["question_inserter"]["question_inserter"][0] + questions = json.loads(input_dict["subquestions"].content) + inserter.invoke({"questions": questions}) + + print("yay!") + + # TODO: add context to question node + pipeline = ( + { + "question": RunnablePassthrough(), + "context": query_tool.to_langchain_tool(), + "question_inserter": RunnableLambda(partial_inserter), + } + | RunnableLambda(print_passthrough) + | { + "subquestions": prompt.partial(num_questions=max_questions) | llm, + "question_inserter": RunnablePassthrough(), + } + | RunnableLambda(insert_questions) + ) + + return Tool.from_function( + func=lambda question: pipeline.invoke({"question": question}), + name="Question Generator Tool", + description="""Generate a list of questions based on the input question, + and insert them into the knowledge graph.""", + args_schema=QuestionGeneratorToolInput, + ) + + +if __name__ == "__main__": + import kuzu + from llama_index.graph_stores.kuzu import KuzuGraphStore + + here = Path(__file__).parent + db_path = str(here / "test2") + + db = kuzu.Database(db_path) + graph_store = KuzuGraphStore(db) + + query_tool = MotleyTool.from_langchain_tool( + Tool.from_function( + func=lambda question: [ + "Germany has consisted of many different states over the years", + "The capital of France has moved in 1815, from Lyons to Paris", + "France actually has two capitals, one in the north and one in the south", + ], + name="Query Tool", + description="Query the library for relevant information.", + args_schema=QuestionGeneratorToolInput, + ) + ) + + tool = QuestionGeneratorTool( + query_tool=query_tool, + graph=graph_store, + max_questions=3, + ) + + tool.invoke({"question": "What is the capital of France?"}) + print("Done!") diff --git a/motleycrew/common/utils.py b/motleycrew/common/utils.py index 0eacb220..36b7b13e 100644 --- a/motleycrew/common/utils.py +++ b/motleycrew/common/utils.py @@ -40,3 +40,7 @@ def generate_hex_hash(data: str, length: Optional[int] = None): if length is not None: hex_hash = hex_hash[:length] return hex_hash + + +def print_passthrough(x): + return x diff --git a/motleycrew/tool/question_insertion_tool.py b/motleycrew/tool/question_insertion_tool.py index 9c737f1a..3d2f57f2 100644 --- a/motleycrew/tool/question_insertion_tool.py +++ b/motleycrew/tool/question_insertion_tool.py @@ -12,12 +12,12 @@ class QuestionInsertionTool(MotleyTool): - def __init__(self, node_id: str, graph: GraphStore): + def __init__(self, question: str, graph: GraphStore): langchain_tool = create_question_insertion_langchain_tool( name="Question Insertion Tool", description="Insert a list of questions (supplied as a list of strings) into the graph.", - node_id=node_id, + question=question, graph=graph, ) @@ -35,13 +35,13 @@ class QuestionInsertionToolInput(BaseModel): def create_question_insertion_langchain_tool( name: str, description: str, - node_id: str, + question: str, graph: GraphStore, ): def insert_questions(questions: list[str]) -> None: - for question in questions: + for subquestion in questions: # TODO: change! This is a placeholder implementation - graph.upsert_triplet(node_id, "IS_SUBQUESTION", question) + graph.upsert_triplet(question, "IS_SUBQUESTION", subquestion) return Tool.from_function( func=insert_questions, @@ -62,10 +62,10 @@ def insert_questions(questions: list[str]) -> None: children_1 = ["What is the capital of France?", "What is the capital of Germany?"] children_2 = ["What is the capital of Italy?", "What is the capital of Spain?"] - tool = QuestionInsertionTool(node_id="Starting question", graph=graph_store) + tool = QuestionInsertionTool(question="Starting question", graph=graph_store) tool.invoke({"questions": children_1}) tool2 = QuestionInsertionTool( - node_id="What is the capital of France?", graph=graph_store + question="What is the capital of France?", graph=graph_store ) tool2.invoke({"questions": children_2}) print( From bb691e8854b492b3a1d8fb95b220850ab1c46400 Mon Sep 17 00:00:00 2001 From: whimo Date: Fri, 26 Apr 2024 12:24:05 +0400 Subject: [PATCH 08/20] Research agent directory in examples --- .../research_agent}/question_generator.py | 0 motleycrew/tool/llm_tool.py | 13 +- poetry.lock | 580 +++++++++--------- pyproject.toml | 2 +- 4 files changed, 293 insertions(+), 302 deletions(-) rename {motleycrew/agent/research => examples/research_agent}/question_generator.py (100%) diff --git a/motleycrew/agent/research/question_generator.py b/examples/research_agent/question_generator.py similarity index 100% rename from motleycrew/agent/research/question_generator.py rename to examples/research_agent/question_generator.py diff --git a/motleycrew/tool/llm_tool.py b/motleycrew/tool/llm_tool.py index 9b35f2b4..1b024abd 100644 --- a/motleycrew/tool/llm_tool.py +++ b/motleycrew/tool/llm_tool.py @@ -18,8 +18,11 @@ def __init__( description: str, prompt: str | BasePromptTemplate, llm: Optional[BaseLanguageModel] = None, + input_description: Optional[str] = "Input for the tool.", ): - langchain_tool = create_llm_langchain_tool(name, description, prompt, llm) + langchain_tool = create_llm_langchain_tool( + name=name, description=description, prompt=prompt, llm=llm, input_description=input_description + ) super().__init__(langchain_tool) @@ -27,8 +30,8 @@ def create_llm_langchain_tool( name: str, description: str, prompt: str | BasePromptTemplate, - llm: Optional[BaseLanguageModel] = None, - input_description: Optional[str] = "Input for the tool.", + llm: Optional[BaseLanguageModel], + input_description: Optional[str], ): if llm is None: llm = init_llm(llm_framework=LLMFramework.LANGCHAIN) @@ -36,9 +39,7 @@ def create_llm_langchain_tool( if not isinstance(prompt, BasePromptTemplate): prompt = PromptTemplate.from_template(prompt) - assert ( - len(prompt.input_variables) == 1 - ), "Prompt must contain exactly one input variable" + assert len(prompt.input_variables) == 1, "Prompt must contain exactly one input variable" input_var = prompt.input_variables[0] class LLMToolInput(BaseModel): diff --git a/poetry.lock b/poetry.lock index 433afd5b..0bbd2913 100644 --- a/poetry.lock +++ b/poetry.lock @@ -196,33 +196,33 @@ lxml = ["lxml"] [[package]] name = "black" -version = "24.4.0" +version = "24.4.2" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" files = [ - {file = "black-24.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6ad001a9ddd9b8dfd1b434d566be39b1cd502802c8d38bbb1ba612afda2ef436"}, - {file = "black-24.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e3a3a092b8b756c643fe45f4624dbd5a389f770a4ac294cf4d0fce6af86addaf"}, - {file = "black-24.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dae79397f367ac8d7adb6c779813328f6d690943f64b32983e896bcccd18cbad"}, - {file = "black-24.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:71d998b73c957444fb7c52096c3843875f4b6b47a54972598741fe9a7f737fcb"}, - {file = "black-24.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8e5537f456a22cf5cfcb2707803431d2feeb82ab3748ade280d6ccd0b40ed2e8"}, - {file = "black-24.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64e60a7edd71fd542a10a9643bf369bfd2644de95ec71e86790b063aa02ff745"}, - {file = "black-24.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cd5b4f76056cecce3e69b0d4c228326d2595f506797f40b9233424e2524c070"}, - {file = "black-24.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:64578cf99b6b46a6301bc28bdb89f9d6f9b592b1c5837818a177c98525dbe397"}, - {file = "black-24.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f95cece33329dc4aa3b0e1a771c41075812e46cf3d6e3f1dfe3d91ff09826ed2"}, - {file = "black-24.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4396ca365a4310beef84d446ca5016f671b10f07abdba3e4e4304218d2c71d33"}, - {file = "black-24.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44d99dfdf37a2a00a6f7a8dcbd19edf361d056ee51093b2445de7ca09adac965"}, - {file = "black-24.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:21f9407063ec71c5580b8ad975653c66508d6a9f57bd008bb8691d273705adcd"}, - {file = "black-24.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:652e55bb722ca026299eb74e53880ee2315b181dfdd44dca98e43448620ddec1"}, - {file = "black-24.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7f2966b9b2b3b7104fca9d75b2ee856fe3fdd7ed9e47c753a4bb1a675f2caab8"}, - {file = "black-24.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bb9ca06e556a09f7f7177bc7cb604e5ed2d2df1e9119e4f7d2f1f7071c32e5d"}, - {file = "black-24.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:d4e71cdebdc8efeb6deaf5f2deb28325f8614d48426bed118ecc2dcaefb9ebf3"}, - {file = "black-24.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6644f97a7ef6f401a150cca551a1ff97e03c25d8519ee0bbc9b0058772882665"}, - {file = "black-24.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:75a2d0b4f5eb81f7eebc31f788f9830a6ce10a68c91fbe0fade34fff7a2836e6"}, - {file = "black-24.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb949f56a63c5e134dfdca12091e98ffb5fd446293ebae123d10fc1abad00b9e"}, - {file = "black-24.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:7852b05d02b5b9a8c893ab95863ef8986e4dda29af80bbbda94d7aee1abf8702"}, - {file = "black-24.4.0-py3-none-any.whl", hash = "sha256:74eb9b5420e26b42c00a3ff470dc0cd144b80a766128b1771d07643165e08d0e"}, - {file = "black-24.4.0.tar.gz", hash = "sha256:f07b69fda20578367eaebbd670ff8fc653ab181e1ff95d84497f9fa20e7d0641"}, + {file = "black-24.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd1b5a14e417189db4c7b64a6540f31730713d173f0b63e55fabd52d61d8fdce"}, + {file = "black-24.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e537d281831ad0e71007dcdcbe50a71470b978c453fa41ce77186bbe0ed6021"}, + {file = "black-24.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaea3008c281f1038edb473c1aa8ed8143a5535ff18f978a318f10302b254063"}, + {file = "black-24.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7768a0dbf16a39aa5e9a3ded568bb545c8c2727396d063bbaf847df05b08cd96"}, + {file = "black-24.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:257d724c2c9b1660f353b36c802ccece186a30accc7742c176d29c146df6e474"}, + {file = "black-24.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bdde6f877a18f24844e381d45e9947a49e97933573ac9d4345399be37621e26c"}, + {file = "black-24.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e151054aa00bad1f4e1f04919542885f89f5f7d086b8a59e5000e6c616896ffb"}, + {file = "black-24.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:7e122b1c4fb252fd85df3ca93578732b4749d9be076593076ef4d07a0233c3e1"}, + {file = "black-24.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d"}, + {file = "black-24.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04"}, + {file = "black-24.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc"}, + {file = "black-24.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:415e686e87dbbe6f4cd5ef0fbf764af7b89f9057b97c908742b6008cc554b9c0"}, + {file = "black-24.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf10f7310db693bb62692609b397e8d67257c55f949abde4c67f9cc574492cc7"}, + {file = "black-24.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:98e123f1d5cfd42f886624d84464f7756f60ff6eab89ae845210631714f6db94"}, + {file = "black-24.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48a85f2cb5e6799a9ef05347b476cce6c182d6c71ee36925a6c194d074336ef8"}, + {file = "black-24.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:b1530ae42e9d6d5b670a34db49a94115a64596bc77710b1d05e9801e62ca0a7c"}, + {file = "black-24.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:37aae07b029fa0174d39daf02748b379399b909652a806e5708199bd93899da1"}, + {file = "black-24.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:da33a1a5e49c4122ccdfd56cd021ff1ebc4a1ec4e2d01594fef9b6f267a9e741"}, + {file = "black-24.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef703f83fc32e131e9bcc0a5094cfe85599e7109f896fe8bc96cc402f3eb4b6e"}, + {file = "black-24.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:b9176b9832e84308818a99a561e90aa479e73c523b3f77afd07913380ae2eab7"}, + {file = "black-24.4.2-py3-none-any.whl", hash = "sha256:d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c"}, + {file = "black-24.4.2.tar.gz", hash = "sha256:c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d"}, ] [package.dependencies] @@ -452,63 +452,63 @@ files = [ [[package]] name = "coverage" -version = "7.4.4" +version = "7.5.0" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0be5efd5127542ef31f165de269f77560d6cdef525fffa446de6f7e9186cfb2"}, - {file = "coverage-7.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ccd341521be3d1b3daeb41960ae94a5e87abe2f46f17224ba5d6f2b8398016cf"}, - {file = "coverage-7.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fa497a8ab37784fbb20ab699c246053ac294d13fc7eb40ec007a5043ec91f8"}, - {file = "coverage-7.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1a93009cb80730c9bca5d6d4665494b725b6e8e157c1cb7f2db5b4b122ea562"}, - {file = "coverage-7.4.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:690db6517f09336559dc0b5f55342df62370a48f5469fabf502db2c6d1cffcd2"}, - {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:09c3255458533cb76ef55da8cc49ffab9e33f083739c8bd4f58e79fecfe288f7"}, - {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8ce1415194b4a6bd0cdcc3a1dfbf58b63f910dcb7330fe15bdff542c56949f87"}, - {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b91cbc4b195444e7e258ba27ac33769c41b94967919f10037e6355e998af255c"}, - {file = "coverage-7.4.4-cp310-cp310-win32.whl", hash = "sha256:598825b51b81c808cb6f078dcb972f96af96b078faa47af7dfcdf282835baa8d"}, - {file = "coverage-7.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:09ef9199ed6653989ebbcaacc9b62b514bb63ea2f90256e71fea3ed74bd8ff6f"}, - {file = "coverage-7.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f9f50e7ef2a71e2fae92774c99170eb8304e3fdf9c8c3c7ae9bab3e7229c5cf"}, - {file = "coverage-7.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:623512f8ba53c422fcfb2ce68362c97945095b864cda94a92edbaf5994201083"}, - {file = "coverage-7.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0513b9508b93da4e1716744ef6ebc507aff016ba115ffe8ecff744d1322a7b63"}, - {file = "coverage-7.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40209e141059b9370a2657c9b15607815359ab3ef9918f0196b6fccce8d3230f"}, - {file = "coverage-7.4.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a2b2b78c78293782fd3767d53e6474582f62443d0504b1554370bde86cc8227"}, - {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:73bfb9c09951125d06ee473bed216e2c3742f530fc5acc1383883125de76d9cd"}, - {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f384c3cc76aeedce208643697fb3e8437604b512255de6d18dae3f27655a384"}, - {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:54eb8d1bf7cacfbf2a3186019bcf01d11c666bd495ed18717162f7eb1e9dd00b"}, - {file = "coverage-7.4.4-cp311-cp311-win32.whl", hash = "sha256:cac99918c7bba15302a2d81f0312c08054a3359eaa1929c7e4b26ebe41e9b286"}, - {file = "coverage-7.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:b14706df8b2de49869ae03a5ccbc211f4041750cd4a66f698df89d44f4bd30ec"}, - {file = "coverage-7.4.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:201bef2eea65e0e9c56343115ba3814e896afe6d36ffd37bab783261db430f76"}, - {file = "coverage-7.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:41c9c5f3de16b903b610d09650e5e27adbfa7f500302718c9ffd1c12cf9d6818"}, - {file = "coverage-7.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d898fe162d26929b5960e4e138651f7427048e72c853607f2b200909794ed978"}, - {file = "coverage-7.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ea79bb50e805cd6ac058dfa3b5c8f6c040cb87fe83de10845857f5535d1db70"}, - {file = "coverage-7.4.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce4b94265ca988c3f8e479e741693d143026632672e3ff924f25fab50518dd51"}, - {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:00838a35b882694afda09f85e469c96367daa3f3f2b097d846a7216993d37f4c"}, - {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fdfafb32984684eb03c2d83e1e51f64f0906b11e64482df3c5db936ce3839d48"}, - {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:69eb372f7e2ece89f14751fbcbe470295d73ed41ecd37ca36ed2eb47512a6ab9"}, - {file = "coverage-7.4.4-cp312-cp312-win32.whl", hash = "sha256:137eb07173141545e07403cca94ab625cc1cc6bc4c1e97b6e3846270e7e1fea0"}, - {file = "coverage-7.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:d71eec7d83298f1af3326ce0ff1d0ea83c7cb98f72b577097f9083b20bdaf05e"}, - {file = "coverage-7.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d5ae728ff3b5401cc320d792866987e7e7e880e6ebd24433b70a33b643bb0384"}, - {file = "coverage-7.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cc4f1358cb0c78edef3ed237ef2c86056206bb8d9140e73b6b89fbcfcbdd40e1"}, - {file = "coverage-7.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8130a2aa2acb8788e0b56938786c33c7c98562697bf9f4c7d6e8e5e3a0501e4a"}, - {file = "coverage-7.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf271892d13e43bc2b51e6908ec9a6a5094a4df1d8af0bfc360088ee6c684409"}, - {file = "coverage-7.4.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4cdc86d54b5da0df6d3d3a2f0b710949286094c3a6700c21e9015932b81447e"}, - {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ae71e7ddb7a413dd60052e90528f2f65270aad4b509563af6d03d53e979feafd"}, - {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:38dd60d7bf242c4ed5b38e094baf6401faa114fc09e9e6632374388a404f98e7"}, - {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa5b1c1bfc28384f1f53b69a023d789f72b2e0ab1b3787aae16992a7ca21056c"}, - {file = "coverage-7.4.4-cp38-cp38-win32.whl", hash = "sha256:dfa8fe35a0bb90382837b238fff375de15f0dcdb9ae68ff85f7a63649c98527e"}, - {file = "coverage-7.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:b2991665420a803495e0b90a79233c1433d6ed77ef282e8e152a324bbbc5e0c8"}, - {file = "coverage-7.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b799445b9f7ee8bf299cfaed6f5b226c0037b74886a4e11515e569b36fe310d"}, - {file = "coverage-7.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b4d33f418f46362995f1e9d4f3a35a1b6322cb959c31d88ae56b0298e1c22357"}, - {file = "coverage-7.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aadacf9a2f407a4688d700e4ebab33a7e2e408f2ca04dbf4aef17585389eff3e"}, - {file = "coverage-7.4.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c95949560050d04d46b919301826525597f07b33beba6187d04fa64d47ac82e"}, - {file = "coverage-7.4.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff7687ca3d7028d8a5f0ebae95a6e4827c5616b31a4ee1192bdfde697db110d4"}, - {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5fc1de20b2d4a061b3df27ab9b7c7111e9a710f10dc2b84d33a4ab25065994ec"}, - {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c74880fc64d4958159fbd537a091d2a585448a8f8508bf248d72112723974cbd"}, - {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:742a76a12aa45b44d236815d282b03cfb1de3b4323f3e4ec933acfae08e54ade"}, - {file = "coverage-7.4.4-cp39-cp39-win32.whl", hash = "sha256:d89d7b2974cae412400e88f35d86af72208e1ede1a541954af5d944a8ba46c57"}, - {file = "coverage-7.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:9ca28a302acb19b6af89e90f33ee3e1906961f94b54ea37de6737b7ca9d8827c"}, - {file = "coverage-7.4.4-pp38.pp39.pp310-none-any.whl", hash = "sha256:b2c5edc4ac10a7ef6605a966c58929ec6c1bd0917fb8c15cb3363f65aa40e677"}, - {file = "coverage-7.4.4.tar.gz", hash = "sha256:c901df83d097649e257e803be22592aedfd5182f07b3cc87d640bbb9afd50f49"}, + {file = "coverage-7.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:432949a32c3e3f820af808db1833d6d1631664d53dd3ce487aa25d574e18ad1c"}, + {file = "coverage-7.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2bd7065249703cbeb6d4ce679c734bef0ee69baa7bff9724361ada04a15b7e3b"}, + {file = "coverage-7.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbfe6389c5522b99768a93d89aca52ef92310a96b99782973b9d11e80511f932"}, + {file = "coverage-7.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:39793731182c4be939b4be0cdecde074b833f6171313cf53481f869937129ed3"}, + {file = "coverage-7.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85a5dbe1ba1bf38d6c63b6d2c42132d45cbee6d9f0c51b52c59aa4afba057517"}, + {file = "coverage-7.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:357754dcdfd811462a725e7501a9b4556388e8ecf66e79df6f4b988fa3d0b39a"}, + {file = "coverage-7.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a81eb64feded34f40c8986869a2f764f0fe2db58c0530d3a4afbcde50f314880"}, + {file = "coverage-7.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:51431d0abbed3a868e967f8257c5faf283d41ec882f58413cf295a389bb22e58"}, + {file = "coverage-7.5.0-cp310-cp310-win32.whl", hash = "sha256:f609ebcb0242d84b7adeee2b06c11a2ddaec5464d21888b2c8255f5fd6a98ae4"}, + {file = "coverage-7.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:6782cd6216fab5a83216cc39f13ebe30adfac2fa72688c5a4d8d180cd52e8f6a"}, + {file = "coverage-7.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e768d870801f68c74c2b669fc909839660180c366501d4cc4b87efd6b0eee375"}, + {file = "coverage-7.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:84921b10aeb2dd453247fd10de22907984eaf80901b578a5cf0bb1e279a587cb"}, + {file = "coverage-7.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:710c62b6e35a9a766b99b15cdc56d5aeda0914edae8bb467e9c355f75d14ee95"}, + {file = "coverage-7.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c379cdd3efc0658e652a14112d51a7668f6bfca7445c5a10dee7eabecabba19d"}, + {file = "coverage-7.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fea9d3ca80bcf17edb2c08a4704259dadac196fe5e9274067e7a20511fad1743"}, + {file = "coverage-7.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:41327143c5b1d715f5f98a397608f90ab9ebba606ae4e6f3389c2145410c52b1"}, + {file = "coverage-7.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:565b2e82d0968c977e0b0f7cbf25fd06d78d4856289abc79694c8edcce6eb2de"}, + {file = "coverage-7.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cf3539007202ebfe03923128fedfdd245db5860a36810136ad95a564a2fdffff"}, + {file = "coverage-7.5.0-cp311-cp311-win32.whl", hash = "sha256:bf0b4b8d9caa8d64df838e0f8dcf68fb570c5733b726d1494b87f3da85db3a2d"}, + {file = "coverage-7.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c6384cc90e37cfb60435bbbe0488444e54b98700f727f16f64d8bfda0b84656"}, + {file = "coverage-7.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fed7a72d54bd52f4aeb6c6e951f363903bd7d70bc1cad64dd1f087980d309ab9"}, + {file = "coverage-7.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cbe6581fcff7c8e262eb574244f81f5faaea539e712a058e6707a9d272fe5b64"}, + {file = "coverage-7.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad97ec0da94b378e593ef532b980c15e377df9b9608c7c6da3506953182398af"}, + {file = "coverage-7.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd4bacd62aa2f1a1627352fe68885d6ee694bdaebb16038b6e680f2924a9b2cc"}, + {file = "coverage-7.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adf032b6c105881f9d77fa17d9eebe0ad1f9bfb2ad25777811f97c5362aa07f2"}, + {file = "coverage-7.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ba01d9ba112b55bfa4b24808ec431197bb34f09f66f7cb4fd0258ff9d3711b1"}, + {file = "coverage-7.5.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f0bfe42523893c188e9616d853c47685e1c575fe25f737adf473d0405dcfa7eb"}, + {file = "coverage-7.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a9a7ef30a1b02547c1b23fa9a5564f03c9982fc71eb2ecb7f98c96d7a0db5cf2"}, + {file = "coverage-7.5.0-cp312-cp312-win32.whl", hash = "sha256:3c2b77f295edb9fcdb6a250f83e6481c679335ca7e6e4a955e4290350f2d22a4"}, + {file = "coverage-7.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:427e1e627b0963ac02d7c8730ca6d935df10280d230508c0ba059505e9233475"}, + {file = "coverage-7.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9dd88fce54abbdbf4c42fb1fea0e498973d07816f24c0e27a1ecaf91883ce69e"}, + {file = "coverage-7.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a898c11dca8f8c97b467138004a30133974aacd572818c383596f8d5b2eb04a9"}, + {file = "coverage-7.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07dfdd492d645eea1bd70fb1d6febdcf47db178b0d99161d8e4eed18e7f62fe7"}, + {file = "coverage-7.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3d117890b6eee85887b1eed41eefe2e598ad6e40523d9f94c4c4b213258e4a4"}, + {file = "coverage-7.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6afd2e84e7da40fe23ca588379f815fb6dbbb1b757c883935ed11647205111cb"}, + {file = "coverage-7.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a9960dd1891b2ddf13a7fe45339cd59ecee3abb6b8326d8b932d0c5da208104f"}, + {file = "coverage-7.5.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ced268e82af993d7801a9db2dbc1d2322e786c5dc76295d8e89473d46c6b84d4"}, + {file = "coverage-7.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e7c211f25777746d468d76f11719e64acb40eed410d81c26cefac641975beb88"}, + {file = "coverage-7.5.0-cp38-cp38-win32.whl", hash = "sha256:262fffc1f6c1a26125d5d573e1ec379285a3723363f3bd9c83923c9593a2ac25"}, + {file = "coverage-7.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:eed462b4541c540d63ab57b3fc69e7d8c84d5957668854ee4e408b50e92ce26a"}, + {file = "coverage-7.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d0194d654e360b3e6cc9b774e83235bae6b9b2cac3be09040880bb0e8a88f4a1"}, + {file = "coverage-7.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33c020d3322662e74bc507fb11488773a96894aa82a622c35a5a28673c0c26f5"}, + {file = "coverage-7.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbdf2cae14a06827bec50bd58e49249452d211d9caddd8bd80e35b53cb04631"}, + {file = "coverage-7.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3235d7c781232e525b0761730e052388a01548bd7f67d0067a253887c6e8df46"}, + {file = "coverage-7.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2de4e546f0ec4b2787d625e0b16b78e99c3e21bc1722b4977c0dddf11ca84e"}, + {file = "coverage-7.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0e206259b73af35c4ec1319fd04003776e11e859936658cb6ceffdeba0f5be"}, + {file = "coverage-7.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2055c4fb9a6ff624253d432aa471a37202cd8f458c033d6d989be4499aed037b"}, + {file = "coverage-7.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:075299460948cd12722a970c7eae43d25d37989da682997687b34ae6b87c0ef0"}, + {file = "coverage-7.5.0-cp39-cp39-win32.whl", hash = "sha256:280132aada3bc2f0fac939a5771db4fbb84f245cb35b94fae4994d4c1f80dae7"}, + {file = "coverage-7.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:c58536f6892559e030e6924896a44098bc1290663ea12532c78cef71d0df8493"}, + {file = "coverage-7.5.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:2b57780b51084d5223eee7b59f0d4911c31c16ee5aa12737c7a02455829ff067"}, + {file = "coverage-7.5.0.tar.gz", hash = "sha256:cf62d17310f34084c59c01e027259076479128d11e4661bb6c9acb38c5e19bb8"}, ] [package.dependencies] @@ -545,28 +545,31 @@ tools = ["crewai-tools (>=0.0.12,<0.0.13)"] [[package]] name = "curl-cffi" -version = "0.6.2" -description = "libcurl ffi bindings for Python, with impersonation support" +version = "0.6.3" +description = "libcurl ffi bindings for Python, with impersonation support." optional = false python-versions = ">=3.8" files = [ - {file = "curl_cffi-0.6.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:23b8a2872b160718c04b06b1f8aa4fb1a2f4f94bce7040493515e081a27cad19"}, - {file = "curl_cffi-0.6.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ad3c1cf5360810825ec4bc3da425f26ee4098878a615dab9d309a99afd883ba9"}, - {file = "curl_cffi-0.6.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d01de6ed737ad1924aaa0198195b9020c38e77ce90ea3d72b9eacf4938c7adf"}, - {file = "curl_cffi-0.6.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37e513cc149d024a2d625e202f2cc9d4423d2937343ea2e06f797d99779e62dc"}, - {file = "curl_cffi-0.6.2-cp38-abi3-win32.whl", hash = "sha256:12e829af97cbf7c1d5afef177e786f6f404ddf163b08897a1ed087cadbeb4837"}, - {file = "curl_cffi-0.6.2-cp38-abi3-win_amd64.whl", hash = "sha256:3791b7a9ae4cb1298165300f2dc2d60a86779f055570ae83163fc2d8a74bf714"}, - {file = "curl_cffi-0.6.2.tar.gz", hash = "sha256:9ee519e960b5fc6e0bbf13d0ecba9ce5f6306cb929354504bf03cc30f59a8f63"}, + {file = "curl_cffi-0.6.3-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ac60ac08f07640bd9ee6ee44310748931a3d49a7c8e878745f1817b46ff0719d"}, + {file = "curl_cffi-0.6.3-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d78a609c3b984df9a14022c0b5fc59c1c643c39fc4cb9100e110f7551339a194"}, + {file = "curl_cffi-0.6.3-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:760b7b837c86626f2c9518c3bad42aad3b6ccb455499b648bc98e8dee9f73891"}, + {file = "curl_cffi-0.6.3-cp38-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5423c2f53f8889bcc9ce42455fc1c0c9487b480944f66aaa6ed5ce81e0fc5540"}, + {file = "curl_cffi-0.6.3-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b4712bc9a8a0d933aef7051eaa5448b9a1c662f25dd83967dbdd46aa3e418c0"}, + {file = "curl_cffi-0.6.3-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5d659acbe051805b9f9c82a7725d534b5842c1a9291159e7733e7c92782ef80b"}, + {file = "curl_cffi-0.6.3-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:ea9b04df071368d13b02c51e87e3e51b46b924be61681fc280cb9633b42dac2c"}, + {file = "curl_cffi-0.6.3-cp38-abi3-win32.whl", hash = "sha256:0f37e3d761e37173462dbc95ae4216165dacef21f0d9ab6bb6802003516a156f"}, + {file = "curl_cffi-0.6.3-cp38-abi3-win_amd64.whl", hash = "sha256:dd36513cd46eb8f2751d45e3aa47a989421d3f3a2bcc54abab487ae38549d91b"}, + {file = "curl_cffi-0.6.3.tar.gz", hash = "sha256:0d2d07467590d66982f29a8dc9050b6a1f41a6c4bb44dcbf24108b13cf71a797"}, ] [package.dependencies] -certifi = "*" +certifi = ">=2024.2.2" cffi = ">=1.12.0" [package.extras] build = ["cibuildwheel", "wheel"] -dev = ["autoflake (==1.4)", "coverage (==6.4.1)", "cryptography (==38.0.3)", "flake8 (==6.0.0)", "flake8-bugbear (==22.7.1)", "flake8-pie (==0.15.0)", "httpx (==0.23.1)", "mypy (==0.971)", "nest-asyncio (==1.6.0)", "pytest (==7.1.2)", "pytest-asyncio (==0.19.0)", "pytest-trio (==0.7.0)", "ruff (==0.1.14)", "trio (==0.21.0)", "trio-typing (==0.7.0)", "trustme (==0.9.0)", "types-certifi (==2021.10.8.2)", "uvicorn (==0.18.3)", "websockets (==11.0.3)"] -test = ["cryptography (==38.0.3)", "fastapi (==0.100.0)", "httpx (==0.23.1)", "nest-asyncio (==1.6.0)", "proxy.py (==2.4.3)", "pytest (==7.1.2)", "pytest-asyncio (==0.19.0)", "pytest-trio (==0.7.0)", "python-multipart (==0.0.6)", "trio (==0.21.0)", "trio-typing (==0.7.0)", "trustme (==0.9.0)", "types-certifi (==2021.10.8.2)", "uvicorn (==0.18.3)", "websockets (==11.0.3)"] +dev = ["autoflake (==1.4)", "charset-normalizer (>=3.3.2,<4)", "coverage (==6.4.1)", "cryptography (==38.0.3)", "flake8 (==6.0.0)", "flake8-bugbear (==22.7.1)", "flake8-pie (==0.15.0)", "httpx (==0.23.1)", "mypy (==1.9.0)", "pytest (==7.1.2)", "pytest-asyncio (==0.19.0)", "pytest-trio (==0.7.0)", "ruff (==0.3.3)", "trio (==0.21.0)", "trio-typing (==0.7.0)", "trustme (==0.9.0)", "types-certifi (==2021.10.8.2)", "uvicorn (==0.18.3)", "websockets (==11.0.3)"] +test = ["charset-normalizer (>=3.3.2,<4)", "cryptography (==38.0.3)", "fastapi (==0.100.0)", "httpx (==0.23.1)", "proxy.py (==2.4.3)", "pytest (==7.1.2)", "pytest-asyncio (==0.19.0)", "pytest-trio (==0.7.0)", "python-multipart (==0.0.6)", "trio (==0.21.0)", "trio-typing (==0.7.0)", "trustme (==0.9.0)", "types-certifi (==2021.10.8.2)", "uvicorn (==0.18.3)", "websockets (==11.0.3)"] [[package]] name = "dataclasses-json" @@ -1061,41 +1064,41 @@ files = [ [[package]] name = "kuzu" -version = "0.1.0" -description = "An in-process property graph database management system built for query speed and scalability." +version = "0.3.2" +description = "Highly scalable, extremely fast, easy-to-use embeddable graph database" optional = false python-versions = "*" files = [ - {file = "kuzu-0.1.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:f60ca54ef3c8627665e376ffa0f044ee922f4cac2b32ed0bb2d20cd1589993be"}, - {file = "kuzu-0.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e3b9302b2b12aa504e22be8f71f58a44594f4609027b791db3b58f27d3ec7eea"}, - {file = "kuzu-0.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a817b73edfb79382cef15db5b9cdd57cef9a95f398811ad5d7ae6a4329ff9613"}, - {file = "kuzu-0.1.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:45605768da01d9c4d9047379905b8dd83d0be30fb0bd8c49ab862c84685ea7e3"}, - {file = "kuzu-0.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9e410b9c7785db065b58707ede9df2f611e15bd3da4d3f4ac8f28e45f651a030"}, - {file = "kuzu-0.1.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:16543b69ddb7171c723cd77df38d338d6c5607bcfaee700c71abb686293f0346"}, - {file = "kuzu-0.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8b4492a904c7345428fc2493583dca1f4f5ee7cdb590006e4648b613ee8fb8b1"}, - {file = "kuzu-0.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8f459426c7a92887a0ee235b87295fdab6ab70e4e1d52647f3b31750784d699"}, - {file = "kuzu-0.1.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3ceebcae55ea47867a6a80c16103164167c4a8fa670184168f0cf91dd82337a"}, - {file = "kuzu-0.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:c24fd6b9633081a79ceedb757a33f028ece31b9d691414db8f97e1be67739462"}, - {file = "kuzu-0.1.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:a0b64068983c289dee9fc624dde4b697e0307bd6c3e90ffcdc8d7d61c4fa450e"}, - {file = "kuzu-0.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d9bea48a3db41a75413b531caa8fd7eb0bbf67357181cebac806d9c188550d1c"}, - {file = "kuzu-0.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ac9171197c0d6ff8b5adc30043c0a3f012d2fc17529c7afcf9c5659db4cf69"}, - {file = "kuzu-0.1.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ec0557cda5735ba03bf28fe766180f3fa92319e3e1611e744248f4ed28357423"}, - {file = "kuzu-0.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:229e1902d1c82faef0fc07dc617ec82d927131dd519a4908f61cfce0f452ba74"}, - {file = "kuzu-0.1.0-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:07b1eb930afe5efd392fc0ad5a58ea27216a464e95231661aacbd0013294cee0"}, - {file = "kuzu-0.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c954af85a5be173741322e239a4c1d8a274d0ff2d0a006386dba587aa3e64c8"}, - {file = "kuzu-0.1.0-cp37-cp37m-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4d761b610ba83448cc6ba51ddd376489d525dddcf8d74492de15cd472ecffd7c"}, - {file = "kuzu-0.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:cdf8dd2277e595719a6706e6340912323dc4db067c8169e24949f4746d467a96"}, - {file = "kuzu-0.1.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:bd0ab78ba72c8befb275d6ad55f19e738f9387f7540fa36faf413ecb76bb8563"}, - {file = "kuzu-0.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6b1fe702f671063c626bd1390cf20910b49893192ae77b303acd6b9f3f62f661"}, - {file = "kuzu-0.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b488f83e5fc4a1bf6a15ed70712e4c2b9c6296f25c4c2a9f8e8015e81f831151"}, - {file = "kuzu-0.1.0-cp38-cp38-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:61ab2d6aa98bee1bf7c3f6893a1706a408531846d1de5b9624eb490ca2117b2c"}, - {file = "kuzu-0.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:80000e12a0703f5062ab3560e701a6a1b2e10992c31de6912f9e5291509b1659"}, - {file = "kuzu-0.1.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:967a99fb3cdea73c3bfa86c45a78ed896468318f6061e42c352e3df96b370e0f"}, - {file = "kuzu-0.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:65d72f536ed91aa114dae8400e5cdd22ba05540782de39462a7da550ed3d438a"}, - {file = "kuzu-0.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74a5eb43e9ae5b1e4ca3cf61cd845c3e300dc737284fda384441af01ad05bd54"}, - {file = "kuzu-0.1.0-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2e78bd924cdcace890e7ed25917d8221b941f239a05391c5d2a08c70b516096e"}, - {file = "kuzu-0.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:37432b996f706354b6c469b6ea1bdd9d7fba4d61256f041e723fc8eefbedcadd"}, - {file = "kuzu-0.1.0.tar.gz", hash = "sha256:12883c9f7d1621c8f4ab50b85ff89d011aef45f7f1328a2c1c6417d5e1456cae"}, + {file = "kuzu-0.3.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:11734936751d6decfd4c1438d09d1f992744b132f4ac52245e14f2cdbcddb762"}, + {file = "kuzu-0.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7c43c0182bac75a03ec046410aeb4eb0dcc85d1d87843d271507c0c53bd51e72"}, + {file = "kuzu-0.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2ab1e20696b96a8ea3f42e721800f5d41d61777f38bbe8c4618b9cce090da9f"}, + {file = "kuzu-0.3.2-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:963d194f8987780aab305cfa5f4401c1a0adc2675544a4adf3d90a7141cadfb3"}, + {file = "kuzu-0.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:6ab9d167931ff67379c4ac846c50758a88ab7e3d0ce681008f472ab0bf732dbe"}, + {file = "kuzu-0.3.2-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:ddb720d8398d25318a9fea7421eecc0ac9fd97dacaf0be3072edea8171d4319d"}, + {file = "kuzu-0.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:49d1f12bdbc9175f42f0e860b420f2fa99fac578a4691afb0651fb1f7c92c720"}, + {file = "kuzu-0.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26e7907cdc4849dafd7d34852742640dc12031093f2a25de91764340312cd0b2"}, + {file = "kuzu-0.3.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93a84cba66f810af87f1cb7743aefc66867f6febd7beba2c02e5081c439dd1c8"}, + {file = "kuzu-0.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:6ad68201d28e0bcf53e819640dfb8454219b92a52f053fd6dc3a09bcd2db999e"}, + {file = "kuzu-0.3.2-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:c348bdf8fe631d8e44ee7923d138ff6d6bb778bddcf40e1a94a3323f2763b015"}, + {file = "kuzu-0.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:260f750b5de8b8e7d7adbf06f76ca1b71263a41922fcdaad8e44237df60c7ef9"}, + {file = "kuzu-0.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c01267865b061c90f8192c5e016f57d01781f33d12f43349f1b258e82d779392"}, + {file = "kuzu-0.3.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:83e86fb82701b8396e9a2dc8340b3d349880dfa3214635cfbb06a4d3a9d955fd"}, + {file = "kuzu-0.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:0c96a41b33e43a8df3c189af86d113777b168c3cc236401c2e2621a67687ee6f"}, + {file = "kuzu-0.3.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:553b86d730b5fa2e5e7825d3b8b8cad01b0d17942e23e32f56e0301ff89fa13d"}, + {file = "kuzu-0.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:188b7567da9046942c2b20018edb82627513f56da0e7f50321e2ee9534881999"}, + {file = "kuzu-0.3.2-cp37-cp37m-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:354242776e3c2a31c8aab58de6d5c4ca8e2cef9aaf50d50ef36e3b20607b8fbb"}, + {file = "kuzu-0.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:8dfe9439840c267186f65bbf00085fa2793fe14f9b93b8d5008c90cda71edbbc"}, + {file = "kuzu-0.3.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:4c053b225ddbf6e72cfb2844efb71938476a07e6e3c2e83640e32852d06a5786"}, + {file = "kuzu-0.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:530bc730ed8d7118e605f970e593ec507c7589da86a2bd752417f44385806df4"}, + {file = "kuzu-0.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:203a5ccb639ca5decaf7bf82e292e84e61aeb6230d1d69a78838a4264a1fe8ff"}, + {file = "kuzu-0.3.2-cp38-cp38-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:166af0e4dd5368a3f2577d0fe4b09177918c741aee6ebf8e3835ab50e8f6e52a"}, + {file = "kuzu-0.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:0d8ef89314e697582af46857a6862a7e3aac7bb669dd411b0bb341c1cac81e86"}, + {file = "kuzu-0.3.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5c417f127c79e574c6964b01404eb4130f245f2f389b238cadc33df0701e754f"}, + {file = "kuzu-0.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:893a51bb1b64022c5df07ad7cc9372f340bbc45fbbcae714c720db82d6848667"}, + {file = "kuzu-0.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6552454259907096388f65e7f5617589529bc57daba50b28f5e2010eb86b43d"}, + {file = "kuzu-0.3.2-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7e08354dc8e729b4114fc14091fe68d3bdd12b676b60fc94b745c3c7eec272a7"}, + {file = "kuzu-0.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:513328bf8f0a1c4083e9d580e5a20b2a234ca9d82be737b750414d6109dbfc0b"}, + {file = "kuzu-0.3.2.tar.gz", hash = "sha256:f08366e0102ac3be6aeb2335563257e0c1a4977fecd20c5e124d2aeb8b818af6"}, ] [[package]] @@ -1141,19 +1144,19 @@ text-helpers = ["chardet (>=5.1.0,<6.0.0)"] [[package]] name = "langchain-community" -version = "0.0.33" +version = "0.0.34" description = "Community contributed LangChain integrations." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_community-0.0.33-py3-none-any.whl", hash = "sha256:830f0d5f4ff9638b99ca01820c26abfa4b65fa705ef89b5ce55ac9aa3a7d83af"}, - {file = "langchain_community-0.0.33.tar.gz", hash = "sha256:bb56dbc1ef11ca09f258468e11368781adda9219e144073e30cda69496d342b2"}, + {file = "langchain_community-0.0.34-py3-none-any.whl", hash = "sha256:bc13b21a44bbfca01bff8b35c10a26d71485b57c1d284f499b577ba6e1a5d84a"}, + {file = "langchain_community-0.0.34.tar.gz", hash = "sha256:96e9a807d9b4777820df5a970996f6bf3ad5632137bf0f4d863bd832bdeb2b0f"}, ] [package.dependencies] aiohttp = ">=3.8.3,<4.0.0" dataclasses-json = ">=0.5.7,<0.7" -langchain-core = ">=0.1.43,<0.2.0" +langchain-core = ">=0.1.45,<0.2.0" langsmith = ">=0.1.0,<0.2.0" numpy = ">=1,<2" PyYAML = ">=5.3" @@ -1167,13 +1170,13 @@ extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15. [[package]] name = "langchain-core" -version = "0.1.44" +version = "0.1.46" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_core-0.1.44-py3-none-any.whl", hash = "sha256:d8772dccef95fc97bfa2dcd19412e620ebe14def1f0e218374971f6e30a46a49"}, - {file = "langchain_core-0.1.44.tar.gz", hash = "sha256:e313975d9ae2926342e6f2ad760338d31f18b1223e9b8b4dc408daeeade46a83"}, + {file = "langchain_core-0.1.46-py3-none-any.whl", hash = "sha256:1c0befcd2665dd4aa153318aa9bf729071644b4c179e491769b8e583b4bf7441"}, + {file = "langchain_core-0.1.46.tar.gz", hash = "sha256:17c416349f5c7a9808e70e3725749a3a2df5088f1ecca045c883871aa95f9c9e"}, ] [package.dependencies] @@ -1256,13 +1259,13 @@ types-requests = ">=2.31.0.2,<3.0.0.0" [[package]] name = "langsmith" -version = "0.1.49" +version = "0.1.51" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.49-py3-none-any.whl", hash = "sha256:cf0db7474c0dfb22015c22bf97f62e850898c3c6af9564dd111c2df225acc1c8"}, - {file = "langsmith-0.1.49.tar.gz", hash = "sha256:5aee8537763f9d62b3368d79d7bfef881e2bfaa28639011d8d7328770cbd6419"}, + {file = "langsmith-0.1.51-py3-none-any.whl", hash = "sha256:1e7363a3f472ecf02a1d91f6dbacde25519554b98c490be71716fcffaab0ca6b"}, + {file = "langsmith-0.1.51.tar.gz", hash = "sha256:b99b40a8c00e66174540865caa61412622fa1dc4f02602965364919c90528f97"}, ] [package.dependencies] @@ -1272,19 +1275,19 @@ requests = ">=2,<3" [[package]] name = "llama-index" -version = "0.10.30" +version = "0.10.32" description = "Interface between LLMs and your data" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "llama_index-0.10.30-py3-none-any.whl", hash = "sha256:7083014be8f74c6037cc33d629985a7505ee3c89b972503f8dd7d4ac7c45a4f4"}, - {file = "llama_index-0.10.30.tar.gz", hash = "sha256:58fab1047c75e39803e8c394fab7d5e81ce97f27281bf7cba6bea40a7328562e"}, + {file = "llama_index-0.10.32-py3-none-any.whl", hash = "sha256:2a344f30120c3bb9f6c24c14056e3098288e7e2607ac18c5cabe2f1ec01f370c"}, + {file = "llama_index-0.10.32.tar.gz", hash = "sha256:857f2e40fdf58d6be27826706f40028cdc8bdcb4b208295debb408fef89ae5ac"}, ] [package.dependencies] llama-index-agent-openai = ">=0.1.4,<0.3.0" llama-index-cli = ">=0.1.2,<0.2.0" -llama-index-core = ">=0.10.30,<0.11.0" +llama-index-core = ">=0.10.32,<0.11.0" llama-index-embeddings-openai = ">=0.1.5,<0.2.0" llama-index-indices-managed-llama-cloud = ">=0.1.2,<0.2.0" llama-index-legacy = ">=0.9.48,<0.10.0" @@ -1297,17 +1300,17 @@ llama-index-readers-llama-parse = ">=0.1.2,<0.2.0" [[package]] name = "llama-index-agent-openai" -version = "0.2.2" +version = "0.2.3" description = "llama-index agent openai integration" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "llama_index_agent_openai-0.2.2-py3-none-any.whl", hash = "sha256:fa8cbc2c7be5a465848f8d5b432db01c55f07dfa06357edb7fb77fb17d534d1e"}, - {file = "llama_index_agent_openai-0.2.2.tar.gz", hash = "sha256:12063dd932c74015796f973986cc52d783f51fda38e4ead72a56d0fd195925ee"}, + {file = "llama_index_agent_openai-0.2.3-py3-none-any.whl", hash = "sha256:3782b24dd611364e391672dadc8308efd58d731a097c34a40e29f28c3abc5034"}, + {file = "llama_index_agent_openai-0.2.3.tar.gz", hash = "sha256:c899d90b32036656a8ef86d0f0378d4168e00eb2d75a10901eab58ba5b2656a4"}, ] [package.dependencies] -llama-index-core = ">=0.10.1,<0.11.0" +llama-index-core = ">=0.10.30,<0.11.0" llama-index-llms-openai = ">=0.1.5,<0.2.0" openai = ">=1.14.0" @@ -1329,13 +1332,13 @@ llama-index-llms-openai = ">=0.1.1,<0.2.0" [[package]] name = "llama-index-core" -version = "0.10.30" +version = "0.10.32" description = "Interface between LLMs and your data" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "llama_index_core-0.10.30-py3-none-any.whl", hash = "sha256:2f291ce2975f9dbf0ea87d684d3d8122ce216265f468f32baa2cf4ecfb34ed2a"}, - {file = "llama_index_core-0.10.30.tar.gz", hash = "sha256:bed3f683606a0b0eb0839677c935a4b57b7bae509a95d380e51c6225630660e0"}, + {file = "llama_index_core-0.10.32-py3-none-any.whl", hash = "sha256:215f7389dadb78f2df13c20312a3e1e03c41f23e3063907469c4bae67bfd458c"}, + {file = "llama_index_core-0.10.32.tar.gz", hash = "sha256:0078c06d9143390e14c86a40e69716c88c7828533341559edd15e52249ede65a"}, ] [package.dependencies] @@ -1373,31 +1376,16 @@ query-tools = ["guidance (>=0.0.64,<0.0.65)", "jsonpath-ng (>=1.6.0,<2.0.0)", "l [[package]] name = "llama-index-embeddings-openai" -version = "0.1.7" +version = "0.1.9" description = "llama-index embeddings openai integration" optional = false -python-versions = ">=3.8.1,<4.0" -files = [ - {file = "llama_index_embeddings_openai-0.1.7-py3-none-any.whl", hash = "sha256:6023925ed1487b0688323d21711efbf8880e82ed3b87ef413255c3dc63a2f2fe"}, - {file = "llama_index_embeddings_openai-0.1.7.tar.gz", hash = "sha256:c71cc9820680c4cedfc9845dc87b94f6851d1ccce1e486fc91298f8fa8d9f27d"}, -] - -[package.dependencies] -llama-index-core = ">=0.10.1,<0.11.0" - -[[package]] -name = "llama-index-graph-stores-kuzu" -version = "0.1.2" -description = "llama-index graph stores kuzu integration" -optional = false -python-versions = ">=3.8.1,<4.0" +python-versions = "<4.0,>=3.8.1" files = [ - {file = "llama_index_graph_stores_kuzu-0.1.2-py3-none-any.whl", hash = "sha256:eba188bc6afdc246cfd15114359fb8d2db4a34e88e3e96385a383d4fbc7a1dcd"}, - {file = "llama_index_graph_stores_kuzu-0.1.2.tar.gz", hash = "sha256:0458860bde83d28cdd760a0cb5502cb0b191cae5896d814c1c420dcb99dbb8fd"}, + {file = "llama_index_embeddings_openai-0.1.9-py3-none-any.whl", hash = "sha256:fbd16d6197b91f4dbdc6d0707e573cc224ac2b0a48d5b370c6232dd8a2282473"}, + {file = "llama_index_embeddings_openai-0.1.9.tar.gz", hash = "sha256:0fd292b2f9a0ad4534a790d6374726bc885853188087eb018167dcf239643924"}, ] [package.dependencies] -kuzu = ">=0.1.0,<0.2.0" llama-index-core = ">=0.10.1,<0.11.0" [[package]] @@ -1456,13 +1444,13 @@ query-tools = ["guidance (>=0.0.64,<0.0.65)", "jsonpath-ng (>=1.6.0,<2.0.0)", "l [[package]] name = "llama-index-llms-openai" -version = "0.1.15" +version = "0.1.16" description = "llama-index llms openai integration" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "llama_index_llms_openai-0.1.15-py3-none-any.whl", hash = "sha256:401ba9db1549e4287b73749dee57b11db1e0ffa814bb1464475aea1ff84442c7"}, - {file = "llama_index_llms_openai-0.1.15.tar.gz", hash = "sha256:6bdbf307b1d43a9a7c2a52f72ba7db61cb96d904f99e7ea5d889dd7818f10814"}, + {file = "llama_index_llms_openai-0.1.16-py3-none-any.whl", hash = "sha256:4a9c0fe969302731907c8fa31631812397637e114a44ebbad11fd6c59def9315"}, + {file = "llama_index_llms_openai-0.1.16.tar.gz", hash = "sha256:313bbc17c241992430a6bf686a1b1edc4276c8256ad6b0550aa1bea1e0fed1a6"}, ] [package.dependencies] @@ -1485,13 +1473,13 @@ llama-index-llms-openai = ">=0.1.1,<0.2.0" [[package]] name = "llama-index-program-openai" -version = "0.1.5" +version = "0.1.6" description = "llama-index program openai integration" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "llama_index_program_openai-0.1.5-py3-none-any.whl", hash = "sha256:20b6efa706ac73e4dc5086900fea1ffcb1eb0787c8a6f081669d37da7235aee0"}, - {file = "llama_index_program_openai-0.1.5.tar.gz", hash = "sha256:c33aa2d2876ad0ff1f9a2a755d4e7d4917240847d0174e7b2d0b8474499bb700"}, + {file = "llama_index_program_openai-0.1.6-py3-none-any.whl", hash = "sha256:4660b338503537c5edca1e0dab606af6ce372b4f1b597e2833c6b602447c5d8d"}, + {file = "llama_index_program_openai-0.1.6.tar.gz", hash = "sha256:c6a4980c5ea826088b28b4dee3367edb20221e6d05eb0e05019049190131d772"}, ] [package.dependencies] @@ -1552,13 +1540,13 @@ llama-parse = ">=0.4.0,<0.5.0" [[package]] name = "llama-parse" -version = "0.4.1" +version = "0.4.2" description = "Parse files into RAG-Optimized formats." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "llama_parse-0.4.1-py3-none-any.whl", hash = "sha256:2c08962b66791c61fc360ae2042f953729c7b8decc3590d01fea5a98ca1f6676"}, - {file = "llama_parse-0.4.1.tar.gz", hash = "sha256:d723af84d6a1fc99eb431915d21865d20b76d8a246dbaa124d1f96c956a644f7"}, + {file = "llama_parse-0.4.2-py3-none-any.whl", hash = "sha256:5ce0390141f216dcd88c1123fea7f2a4f561d177f791a97217a3db3509dec4ff"}, + {file = "llama_parse-0.4.2.tar.gz", hash = "sha256:fa04c09730b102155f6505de9cf91998c86d334581f0f12597c5eb47ca5db859"}, ] [package.dependencies] @@ -1581,16 +1569,17 @@ pydantic = ">=1.10" [[package]] name = "lunary" -version = "1.0.3" +version = "1.0.6" description = "Observability, analytics and evaluations for AI agents and chatbots." optional = false python-versions = "<4.0.0,>=3.8.1" files = [ - {file = "lunary-1.0.3-py3-none-any.whl", hash = "sha256:f3ff4b85c589576541ae517bab46defed471853deab28fb85a0c90192d823984"}, - {file = "lunary-1.0.3.tar.gz", hash = "sha256:a760637b349ff0aa33d81e173385f209e165a988da16b50713d86242a8d94696"}, + {file = "lunary-1.0.6-py3-none-any.whl", hash = "sha256:790cbba537d883cfdefd6766f2dcdbe6208947f41362681f9816d5949a866dd5"}, + {file = "lunary-1.0.6.tar.gz", hash = "sha256:970906e86b3e0bddd439bf9549c01a8295a1dde792dc999358b5307ad4b01f80"}, ] [package.dependencies] +aiohttp = ">=3.9.5,<4.0.0" chevron = ">=0.14.0,<0.15.0" opentelemetry-api = ">=1.21.0,<2.0.0" opentelemetry-sdk = ">=1.21.0,<2.0.0" @@ -1766,38 +1755,38 @@ files = [ [[package]] name = "mypy" -version = "1.9.0" +version = "1.10.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, - {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, - {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, - {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, - {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, - {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, - {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, - {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, - {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, - {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, - {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, - {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, - {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, - {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, - {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, - {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, - {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, - {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, - {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, + {file = "mypy-1.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da1cbf08fb3b851ab3b9523a884c232774008267b1f83371ace57f412fe308c2"}, + {file = "mypy-1.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:12b6bfc1b1a66095ab413160a6e520e1dc076a28f3e22f7fb25ba3b000b4ef99"}, + {file = "mypy-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e36fb078cce9904c7989b9693e41cb9711e0600139ce3970c6ef814b6ebc2b2"}, + {file = "mypy-1.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2b0695d605ddcd3eb2f736cd8b4e388288c21e7de85001e9f85df9187f2b50f9"}, + {file = "mypy-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:cd777b780312ddb135bceb9bc8722a73ec95e042f911cc279e2ec3c667076051"}, + {file = "mypy-1.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3be66771aa5c97602f382230165b856c231d1277c511c9a8dd058be4784472e1"}, + {file = "mypy-1.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8b2cbaca148d0754a54d44121b5825ae71868c7592a53b7292eeb0f3fdae95ee"}, + {file = "mypy-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ec404a7cbe9fc0e92cb0e67f55ce0c025014e26d33e54d9e506a0f2d07fe5de"}, + {file = "mypy-1.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e22e1527dc3d4aa94311d246b59e47f6455b8729f4968765ac1eacf9a4760bc7"}, + {file = "mypy-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:a87dbfa85971e8d59c9cc1fcf534efe664d8949e4c0b6b44e8ca548e746a8d53"}, + {file = "mypy-1.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a781f6ad4bab20eef8b65174a57e5203f4be627b46291f4589879bf4e257b97b"}, + {file = "mypy-1.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b808e12113505b97d9023b0b5e0c0705a90571c6feefc6f215c1df9381256e30"}, + {file = "mypy-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f55583b12156c399dce2df7d16f8a5095291354f1e839c252ec6c0611e86e2e"}, + {file = "mypy-1.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cf18f9d0efa1b16478c4c129eabec36148032575391095f73cae2e722fcf9d5"}, + {file = "mypy-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:bc6ac273b23c6b82da3bb25f4136c4fd42665f17f2cd850771cb600bdd2ebeda"}, + {file = "mypy-1.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9fd50226364cd2737351c79807775136b0abe084433b55b2e29181a4c3c878c0"}, + {file = "mypy-1.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f90cff89eea89273727d8783fef5d4a934be2fdca11b47def50cf5d311aff727"}, + {file = "mypy-1.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fcfc70599efde5c67862a07a1aaf50e55bce629ace26bb19dc17cece5dd31ca4"}, + {file = "mypy-1.10.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:075cbf81f3e134eadaf247de187bd604748171d6b79736fa9b6c9685b4083061"}, + {file = "mypy-1.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:3f298531bca95ff615b6e9f2fc0333aae27fa48052903a0ac90215021cdcfa4f"}, + {file = "mypy-1.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa7ef5244615a2523b56c034becde4e9e3f9b034854c93639adb667ec9ec2976"}, + {file = "mypy-1.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3236a4c8f535a0631f85f5fcdffba71c7feeef76a6002fcba7c1a8e57c8be1ec"}, + {file = "mypy-1.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a2b5cdbb5dd35aa08ea9114436e0d79aceb2f38e32c21684dcf8e24e1e92821"}, + {file = "mypy-1.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92f93b21c0fe73dc00abf91022234c79d793318b8a96faac147cd579c1671746"}, + {file = "mypy-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:28d0e038361b45f099cc086d9dd99c15ff14d0188f44ac883010e172ce86c38a"}, + {file = "mypy-1.10.0-py3-none-any.whl", hash = "sha256:f8c083976eb530019175aabadb60921e73b4f45736760826aa1689dda8208aee"}, + {file = "mypy-1.10.0.tar.gz", hash = "sha256:3d087fcbec056c4ee34974da493a826ce316947485cef3901f511848e687c131"}, ] [package.dependencies] @@ -1923,13 +1912,13 @@ files = [ [[package]] name = "openai" -version = "1.23.1" +version = "1.23.6" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.23.1-py3-none-any.whl", hash = "sha256:7941c1bc6fcdb1b6b889dfcfabff775ca52558a79d57dd1b9e15b463de1b3a4c"}, - {file = "openai-1.23.1.tar.gz", hash = "sha256:6df937e2a1ad64494951ea3614f5516db4d67c3fcc0b751b8e5edf1bc57e2d3d"}, + {file = "openai-1.23.6-py3-none-any.whl", hash = "sha256:f406c76ba279d16b9aca5a89cee0d968488e39f671f4dc6f0d690ac3c6f6fca1"}, + {file = "openai-1.23.6.tar.gz", hash = "sha256:612de2d54cf580920a1156273f84aada6b3dca26d048f62eb5364a4314d7f449"}, ] [package.dependencies] @@ -2277,28 +2266,29 @@ xmp = ["defusedxml"] [[package]] name = "platformdirs" -version = "4.2.0" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +version = "4.2.1" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"}, - {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"}, + {file = "platformdirs-4.2.1-py3-none-any.whl", hash = "sha256:17d5a1161b3fd67b390023cb2d3b026bbd40abde6fdb052dfbd3a29c3ba22ee1"}, + {file = "platformdirs-4.2.1.tar.gz", hash = "sha256:031cd18d4ec63ec53e82dceaac0417d218a6863f7745dfcc9efe7793b7039bdf"}, ] [package.extras] docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] [[package]] name = "pluggy" -version = "1.4.0" +version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, - {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [package.extras] @@ -2349,18 +2339,18 @@ files = [ [[package]] name = "pydantic" -version = "2.7.0" +version = "2.7.1" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.7.0-py3-none-any.whl", hash = "sha256:9dee74a271705f14f9a1567671d144a851c675b072736f0a7b2608fd9e495352"}, - {file = "pydantic-2.7.0.tar.gz", hash = "sha256:b5ecdd42262ca2462e2624793551e80911a1e989f462910bb81aef974b4bb383"}, + {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, + {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.18.1" +pydantic-core = "2.18.2" typing-extensions = ">=4.6.1" [package.extras] @@ -2368,90 +2358,90 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.18.1" +version = "2.18.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.18.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ee9cf33e7fe14243f5ca6977658eb7d1042caaa66847daacbd2117adb258b226"}, - {file = "pydantic_core-2.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6b7bbb97d82659ac8b37450c60ff2e9f97e4eb0f8a8a3645a5568b9334b08b50"}, - {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df4249b579e75094f7e9bb4bd28231acf55e308bf686b952f43100a5a0be394c"}, - {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d0491006a6ad20507aec2be72e7831a42efc93193d2402018007ff827dc62926"}, - {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ae80f72bb7a3e397ab37b53a2b49c62cc5496412e71bc4f1277620a7ce3f52b"}, - {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:58aca931bef83217fca7a390e0486ae327c4af9c3e941adb75f8772f8eeb03a1"}, - {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1be91ad664fc9245404a789d60cba1e91c26b1454ba136d2a1bf0c2ac0c0505a"}, - {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:667880321e916a8920ef49f5d50e7983792cf59f3b6079f3c9dac2b88a311d17"}, - {file = "pydantic_core-2.18.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f7054fdc556f5421f01e39cbb767d5ec5c1139ea98c3e5b350e02e62201740c7"}, - {file = "pydantic_core-2.18.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:030e4f9516f9947f38179249778709a460a3adb516bf39b5eb9066fcfe43d0e6"}, - {file = "pydantic_core-2.18.1-cp310-none-win32.whl", hash = "sha256:2e91711e36e229978d92642bfc3546333a9127ecebb3f2761372e096395fc649"}, - {file = "pydantic_core-2.18.1-cp310-none-win_amd64.whl", hash = "sha256:9a29726f91c6cb390b3c2338f0df5cd3e216ad7a938762d11c994bb37552edb0"}, - {file = "pydantic_core-2.18.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:9ece8a49696669d483d206b4474c367852c44815fca23ac4e48b72b339807f80"}, - {file = "pydantic_core-2.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7a5d83efc109ceddb99abd2c1316298ced2adb4570410defe766851a804fcd5b"}, - {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f7973c381283783cd1043a8c8f61ea5ce7a3a58b0369f0ee0ee975eaf2f2a1b"}, - {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:54c7375c62190a7845091f521add19b0f026bcf6ae674bdb89f296972272e86d"}, - {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd63cec4e26e790b70544ae5cc48d11b515b09e05fdd5eff12e3195f54b8a586"}, - {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:561cf62c8a3498406495cfc49eee086ed2bb186d08bcc65812b75fda42c38294"}, - {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68717c38a68e37af87c4da20e08f3e27d7e4212e99e96c3d875fbf3f4812abfc"}, - {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d5728e93d28a3c63ee513d9ffbac9c5989de8c76e049dbcb5bfe4b923a9739d"}, - {file = "pydantic_core-2.18.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f0f17814c505f07806e22b28856c59ac80cee7dd0fbb152aed273e116378f519"}, - {file = "pydantic_core-2.18.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d816f44a51ba5175394bc6c7879ca0bd2be560b2c9e9f3411ef3a4cbe644c2e9"}, - {file = "pydantic_core-2.18.1-cp311-none-win32.whl", hash = "sha256:09f03dfc0ef8c22622eaa8608caa4a1e189cfb83ce847045eca34f690895eccb"}, - {file = "pydantic_core-2.18.1-cp311-none-win_amd64.whl", hash = "sha256:27f1009dc292f3b7ca77feb3571c537276b9aad5dd4efb471ac88a8bd09024e9"}, - {file = "pydantic_core-2.18.1-cp311-none-win_arm64.whl", hash = "sha256:48dd883db92e92519201f2b01cafa881e5f7125666141a49ffba8b9facc072b0"}, - {file = "pydantic_core-2.18.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b6b0e4912030c6f28bcb72b9ebe4989d6dc2eebcd2a9cdc35fefc38052dd4fe8"}, - {file = "pydantic_core-2.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f3202a429fe825b699c57892d4371c74cc3456d8d71b7f35d6028c96dfecad31"}, - {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3982b0a32d0a88b3907e4b0dc36809fda477f0757c59a505d4e9b455f384b8b"}, - {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25595ac311f20e5324d1941909b0d12933f1fd2171075fcff763e90f43e92a0d"}, - {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14fe73881cf8e4cbdaded8ca0aa671635b597e42447fec7060d0868b52d074e6"}, - {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca976884ce34070799e4dfc6fbd68cb1d181db1eefe4a3a94798ddfb34b8867f"}, - {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:684d840d2c9ec5de9cb397fcb3f36d5ebb6fa0d94734f9886032dd796c1ead06"}, - {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:54764c083bbe0264f0f746cefcded6cb08fbbaaf1ad1d78fb8a4c30cff999a90"}, - {file = "pydantic_core-2.18.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:201713f2f462e5c015b343e86e68bd8a530a4f76609b33d8f0ec65d2b921712a"}, - {file = "pydantic_core-2.18.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fd1a9edb9dd9d79fbeac1ea1f9a8dd527a6113b18d2e9bcc0d541d308dae639b"}, - {file = "pydantic_core-2.18.1-cp312-none-win32.whl", hash = "sha256:d5e6b7155b8197b329dc787356cfd2684c9d6a6b1a197f6bbf45f5555a98d411"}, - {file = "pydantic_core-2.18.1-cp312-none-win_amd64.whl", hash = "sha256:9376d83d686ec62e8b19c0ac3bf8d28d8a5981d0df290196fb6ef24d8a26f0d6"}, - {file = "pydantic_core-2.18.1-cp312-none-win_arm64.whl", hash = "sha256:c562b49c96906b4029b5685075fe1ebd3b5cc2601dfa0b9e16c2c09d6cbce048"}, - {file = "pydantic_core-2.18.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3e352f0191d99fe617371096845070dee295444979efb8f27ad941227de6ad09"}, - {file = "pydantic_core-2.18.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0295d52b012cbe0d3059b1dba99159c3be55e632aae1999ab74ae2bd86a33d7"}, - {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56823a92075780582d1ffd4489a2e61d56fd3ebb4b40b713d63f96dd92d28144"}, - {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dd3f79e17b56741b5177bcc36307750d50ea0698df6aa82f69c7db32d968c1c2"}, - {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38a5024de321d672a132b1834a66eeb7931959c59964b777e8f32dbe9523f6b1"}, - {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2ce426ee691319d4767748c8e0895cfc56593d725594e415f274059bcf3cb76"}, - {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2adaeea59849ec0939af5c5d476935f2bab4b7f0335b0110f0f069a41024278e"}, - {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9b6431559676a1079eac0f52d6d0721fb8e3c5ba43c37bc537c8c83724031feb"}, - {file = "pydantic_core-2.18.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:85233abb44bc18d16e72dc05bf13848a36f363f83757541f1a97db2f8d58cfd9"}, - {file = "pydantic_core-2.18.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:641a018af4fe48be57a2b3d7a1f0f5dbca07c1d00951d3d7463f0ac9dac66622"}, - {file = "pydantic_core-2.18.1-cp38-none-win32.whl", hash = "sha256:63d7523cd95d2fde0d28dc42968ac731b5bb1e516cc56b93a50ab293f4daeaad"}, - {file = "pydantic_core-2.18.1-cp38-none-win_amd64.whl", hash = "sha256:907a4d7720abfcb1c81619863efd47c8a85d26a257a2dbebdb87c3b847df0278"}, - {file = "pydantic_core-2.18.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:aad17e462f42ddbef5984d70c40bfc4146c322a2da79715932cd8976317054de"}, - {file = "pydantic_core-2.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:94b9769ba435b598b547c762184bcfc4783d0d4c7771b04a3b45775c3589ca44"}, - {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80e0e57cc704a52fb1b48f16d5b2c8818da087dbee6f98d9bf19546930dc64b5"}, - {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76b86e24039c35280ceee6dce7e62945eb93a5175d43689ba98360ab31eebc4a"}, - {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12a05db5013ec0ca4a32cc6433f53faa2a014ec364031408540ba858c2172bb0"}, - {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:250ae39445cb5475e483a36b1061af1bc233de3e9ad0f4f76a71b66231b07f88"}, - {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a32204489259786a923e02990249c65b0f17235073149d0033efcebe80095570"}, - {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6395a4435fa26519fd96fdccb77e9d00ddae9dd6c742309bd0b5610609ad7fb2"}, - {file = "pydantic_core-2.18.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2533ad2883f001efa72f3d0e733fb846710c3af6dcdd544fe5bf14fa5fe2d7db"}, - {file = "pydantic_core-2.18.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b560b72ed4816aee52783c66854d96157fd8175631f01ef58e894cc57c84f0f6"}, - {file = "pydantic_core-2.18.1-cp39-none-win32.whl", hash = "sha256:582cf2cead97c9e382a7f4d3b744cf0ef1a6e815e44d3aa81af3ad98762f5a9b"}, - {file = "pydantic_core-2.18.1-cp39-none-win_amd64.whl", hash = "sha256:ca71d501629d1fa50ea7fa3b08ba884fe10cefc559f5c6c8dfe9036c16e8ae89"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e178e5b66a06ec5bf51668ec0d4ac8cfb2bdcb553b2c207d58148340efd00143"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:72722ce529a76a4637a60be18bd789d8fb871e84472490ed7ddff62d5fed620d"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fe0c1ce5b129455e43f941f7a46f61f3d3861e571f2905d55cdbb8b5c6f5e2c"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4284c621f06a72ce2cb55f74ea3150113d926a6eb78ab38340c08f770eb9b4d"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a0c3e718f4e064efde68092d9d974e39572c14e56726ecfaeebbe6544521f47"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2027493cc44c23b598cfaf200936110433d9caa84e2c6cf487a83999638a96ac"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:76909849d1a6bffa5a07742294f3fa1d357dc917cb1fe7b470afbc3a7579d539"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ee7ccc7fb7e921d767f853b47814c3048c7de536663e82fbc37f5eb0d532224b"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ee2794111c188548a4547eccc73a6a8527fe2af6cf25e1a4ebda2fd01cdd2e60"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a139fe9f298dc097349fb4f28c8b81cc7a202dbfba66af0e14be5cfca4ef7ce5"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d074b07a10c391fc5bbdcb37b2f16f20fcd9e51e10d01652ab298c0d07908ee2"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c69567ddbac186e8c0aadc1f324a60a564cfe25e43ef2ce81bcc4b8c3abffbae"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:baf1c7b78cddb5af00971ad5294a4583188bda1495b13760d9f03c9483bb6203"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2684a94fdfd1b146ff10689c6e4e815f6a01141781c493b97342cdc5b06f4d5d"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:73c1bc8a86a5c9e8721a088df234265317692d0b5cd9e86e975ce3bc3db62a59"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e60defc3c15defb70bb38dd605ff7e0fae5f6c9c7cbfe0ad7868582cb7e844a6"}, - {file = "pydantic_core-2.18.1.tar.gz", hash = "sha256:de9d3e8717560eb05e28739d1b35e4eac2e458553a52a301e51352a7ffc86a35"}, + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, + {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, + {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, + {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, + {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, + {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, + {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, + {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, + {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"}, + {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"}, + {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"}, + {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"}, + {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, + {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, ] [package.dependencies] @@ -3328,4 +3318,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.10,<=3.13" -content-hash = "a9a8a5a94aa310dc866ec41a0fba6c0ff748d27d71cd38d5b5012d9f247a2357" +content-hash = "59e108049a89da115a0689c7107cdce57a5af78d69777f1b9ceefe00829a0642" diff --git a/pyproject.toml b/pyproject.toml index 24be4bbb..ed643697 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ langchain-experimental = "^0.0.57" python-dotenv = "^1.0.0" lunary = "^1.0.3" langchainhub = "^0.1.15" -llama-index-graph-stores-kuzu = "^0.1.2" +kuzu = "^0.3.0" [tool.poetry.group.dev.dependencies] black = "^24.2.0" From 068f5f44ac71ab4b44ee8c206c9e69a656fe89b5 Mon Sep 17 00:00:00 2001 From: whimo Date: Fri, 26 Apr 2024 12:34:48 +0400 Subject: [PATCH 09/20] WIP: research agent orchestrator --- examples/research_agent/research_agent.py | 84 ++++++++++ motleycrew/storage/__init__.py | 2 +- motleycrew/storage/kuzu_graph_store.py | 185 ++++++++++++---------- 3 files changed, 183 insertions(+), 88 deletions(-) create mode 100644 examples/research_agent/research_agent.py diff --git a/examples/research_agent/research_agent.py b/examples/research_agent/research_agent.py new file mode 100644 index 00000000..491d9846 --- /dev/null +++ b/examples/research_agent/research_agent.py @@ -0,0 +1,84 @@ +import logging +import sys +import kuzu + +from langchain.prompts import PromptTemplate + +from motleycrew.storage import MotleyKuzuGraphStore +from motleycrew.tool.llm_tool import LLMTool + +logging.basicConfig(stream=sys.stdout, level=logging.INFO) + + +QUESTION_PRIORITIZATION_TEMPLATE = PromptTemplate( + template=( + "You are provided with the following list of questions:" + " {unanswered_questions} \n" + " Your task is to choose one question from the above list" + " that is the most pertinent to the following query:\n" + " '{original_question}' \n" + " Respond with one question out of the provided list of questions." + " Return the questions as it is without any edits." + " Format your response like:\n" + " #. question" + ), + input_variables=["unanswered_questions", "original_question"], +) + + +class KnowledgeGainingOrchestrator: + def __init__(self, db_path: str): + self.db = kuzu.Database(db_path) + self.storage = MotleyKuzuGraphStore( + self.db, node_table_schema={"question": "STRING", "answer": "STRING", "context": "STRING"} + ) + + self.question_prioritization_tool = LLMTool( + name="question_prioritization_tool", + description="find the most important question", + prompt=QUESTION_PRIORITIZATION_TEMPLATE, + ) + self.question_generation_tool = None + + def get_unanswered_questions(self, only_without_children: bool = False) -> list[dict]: + if only_without_children: + query = "MATCH (n1:{}) WHERE n1.answer IS NULL AND NOT (n1)-[:{}]->(:{}) RETURN n1;".format( + self.storage.node_table_name, self.storage.rel_table_name, self.storage.node_table_name + ) + else: + query = "MATCH (n1:{}) WHERE n1.answer IS NULL RETURN n1;".format(self.storage.node_table_name) + + query_result = self.storage.run_query(query) + return [row[0] for row in query_result] # flatten + + def __call__(self, query: str, max_iter: int): + self.storage.create_entity({"question": query}) + + for iter_n in range(max_iter): + logging.info("====== Iteration %s of %s ======", iter_n, max_iter) + + unanswered_questions = self.get_unanswered_questions(only_without_children=True) + logging.info("Loaded unanswered questions: %s", unanswered_questions) + + tool_input = "\n".join(f"{i}. {question}" for i, question in enumerate(unanswered_questions)) + most_pertinent_question_raw = self.question_prioritization_tool.invoke(tool_input) + logging.info("Most pertinent question according to the tool: %s", most_pertinent_question_raw) + + i, most_pertinent_question_text = most_pertinent_question_raw.split(".", 1) + assert i < len(unanswered_questions) + + most_pertinent_question = unanswered_questions[i] + assert most_pertinent_question_text.strip() == most_pertinent_question["question"].strip() + + logging.info("Generating new questions") + + +if __name__ == "__main__": + from pathlib import Path + import shutil + + here = Path(__file__).parent + db_path = here / "research_db" + shutil.rmtree(db_path, ignore_errors=True) + + orchestrator = KnowledgeGainingOrchestrator(db_path=str(db_path)) diff --git a/motleycrew/storage/__init__.py b/motleycrew/storage/__init__.py index 096759ad..1f544aec 100644 --- a/motleycrew/storage/__init__.py +++ b/motleycrew/storage/__init__.py @@ -1 +1 @@ -from .kuzu_graph_store import MotleyQuestionGraphStore +from .kuzu_graph_store import MotleyKuzuGraphStore diff --git a/motleycrew/storage/kuzu_graph_store.py b/motleycrew/storage/kuzu_graph_store.py index e930c281..d2831b67 100644 --- a/motleycrew/storage/kuzu_graph_store.py +++ b/motleycrew/storage/kuzu_graph_store.py @@ -5,21 +5,22 @@ from typing import Any, Dict, List, Optional +import json import kuzu -class MotleyQuestionGraphStore: - IS_SUBQUESTION_PREDICATE = "IS_SUBQUESTION" - +class MotleyKuzuGraphStore: def __init__( self, database: Any, - node_table_name: str = "question", + node_table_schema: dict[str, str], + node_table_name: str = "entity", rel_table_name: str = "links", **kwargs: Any, ) -> None: self.database = database self.connection = kuzu.Connection(database) + self.node_table_schema = node_table_schema self.node_table_name = node_table_name self.rel_table_name = rel_table_name self.init_schema() @@ -28,10 +29,13 @@ def init_schema(self) -> None: """Initialize schema if the tables do not exist.""" node_tables = self.connection._get_node_table_names() if self.node_table_name not in node_tables: - self.connection.execute( - "CREATE NODE TABLE %s (ID SERIAL, question STRING, answer STRING, context STRING[], PRIMARY KEY(ID))" - % self.node_table_name + node_table_schema_expr = ", ".join( + ["id SERIAL"] + + [f"{name} {datatype}" for name, datatype in self.node_table_schema.items()] + + ["PRIMARY KEY(id)"] ) + self.connection.execute("CREATE NODE TABLE {} ({})".format(self.node_table_name, node_table_schema_expr)) + rel_tables = self.connection._get_rel_table_names() rel_tables = [rel_table["name"] for rel_table in rel_tables] if self.rel_table_name not in rel_tables: @@ -45,121 +49,117 @@ def init_schema(self) -> None: def client(self) -> Any: return self.connection - def check_question_exists(self, question_id: int) -> bool: + def check_entity_exists(self, entity_id: int) -> bool: is_exists_result = self.connection.execute( - "MATCH (n:%s) WHERE n.ID = $question_id RETURN n.ID" % self.node_table_name, - {"question_id": question_id}, + "MATCH (n:%s) WHERE n.id = $entity_id RETURN n.id" % self.node_table_name, + {"entity_id": entity_id}, ) return is_exists_result.has_next() - def get_question(self, question_id: int) -> Optional[dict]: + def get_entity(self, entity_id: int) -> Optional[dict]: query = """ MATCH (n1:%s) - WHERE n1.ID = $question_id + WHERE n1.id = $entity_id RETURN n1; """ prepared_statement = self.connection.prepare(query % self.node_table_name) - query_result = self.connection.execute(prepared_statement, {"question_id": question_id}) + query_result = self.connection.execute(prepared_statement, {"entity_id": entity_id}) if query_result.has_next(): row = query_result.get_next() - return row[0] - - def get_subquestions(self, question_id: int) -> List[int]: - query = """ - MATCH (n1:%s)-[r:%s]->(n2:%s) - WHERE n1.ID = $question_id - AND r.predicate = $is_subquestion_predicate - RETURN n2.ID; - """ - prepared_statement = self.connection.prepare( - query % (self.node_table_name, self.rel_table_name, self.node_table_name) - ) - query_result = self.connection.execute( - prepared_statement, - { - "question_id": question_id, - "is_subquestion_predicate": MotleyQuestionGraphStore.IS_SUBQUESTION_PREDICATE, - }, - ) - retval = [] - while query_result.has_next(): - row = query_result.get_next() - retval.append(row[0]) - return retval + item = row[0] + return item - def create_question(self, question: str) -> int: + def create_entity(self, entity: dict) -> int: + """Create a new entity and return its id""" create_result = self.connection.execute( - "CREATE (n:%s {question: $question}) " "RETURN n.ID" % self.node_table_name, - {"question": question}, + "CREATE (n:{} $entity) RETURN n.id".format(self.node_table_name), + {"entity": entity}, ) assert create_result.has_next() return create_result.get_next()[0] - def create_subquestion(self, question_id: int, subquestion: str) -> int: - def create_subquestion_rel(connection: Any, question_id: int, subquestion_id: int) -> None: - connection.execute( - ( - "MATCH (n1:{}), (n2:{}) WHERE n1.ID = $question_id AND n2.ID = $subquestion_id " - "CREATE (n1)-[r:{} {{predicate: $is_subquestion_predicate}}]->(n2)" - ).format(self.node_table_name, self.node_table_name, self.rel_table_name), - { - "question_id": question_id, - "subquestion_id": subquestion_id, - "is_subquestion_predicate": MotleyQuestionGraphStore.IS_SUBQUESTION_PREDICATE, - }, - ) - - if not self.check_question_exists(question_id): - raise Exception(f"No question with id {question_id}") - - subquestion_id = self.create_question(subquestion) - create_subquestion_rel(self.connection, question_id=question_id, subquestion_id=subquestion_id) - return subquestion_id + def create_rel(self, from_id: int, to_id: int, predicate: str) -> None: + self.connection.execute( + ( + "MATCH (n1:{}), (n2:{}) WHERE n1.id = $from_id AND n2.id = $to_id " + "CREATE (n1)-[r:{} {{predicate: $predicate}}]->(n2)" + ).format(self.node_table_name, self.node_table_name, self.rel_table_name), + { + "from_id": from_id, + "to_id": to_id, + "predicate": predicate, + }, + ) - def delete_question(self, question_id: int) -> None: - """Deletes question and its relations.""" + def delete_entity(self, entity_id: int) -> None: + """Delete a given entity and its relations""" - def delete_rels(connection: Any, question_id: int) -> None: + def delete_rels(connection: Any, entity_id: int) -> None: + # Undirected relation removal is not supported for some reason connection.execute( - "MATCH (n1:{})-[r:{}]->(n2:{}) WHERE n1.ID = $question_id DELETE r".format( - self.node_table_name, self.rel_table_name, self.node_table_name + "MATCH (n1:{})-[r:{}]->(n2:{}) WHERE n1.id = $entity_id DELETE r;" + "MATCH (n1:{})<-[r:{}]-(n2:{}) WHERE n1.id = $entity_id DELETE r".format( + self.node_table_name, + self.rel_table_name, + self.node_table_name, + self.node_table_name, + self.rel_table_name, + self.node_table_name, ), - {"question_id": question_id}, + {"entity_id": entity_id}, ) connection.execute( - "MATCH (n1:{})<-[r:{}]-(n2:{}) WHERE n1.ID = $question_id DELETE r".format( + "MATCH (n1:{})<-[r:{}]-(n2:{}) WHERE n1.id = $entity_id DELETE r".format( self.node_table_name, self.rel_table_name, self.node_table_name ), - {"question_id": question_id}, + {"entity_id": entity_id}, ) - def delete_question(connection: Any, question_id: int) -> None: + def delete_entity(connection: Any, entity_id: int) -> None: connection.execute( - "MATCH (n:%s) WHERE n.ID = $question_id DELETE n" % self.node_table_name, - {"question_id": question_id}, + "MATCH (n:%s) WHERE n.id = $entity_id DELETE n" % self.node_table_name, + {"entity_id": entity_id}, ) - delete_rels(self.connection, question_id) - delete_question(self.connection, question_id) + delete_rels(self.connection, entity_id) + delete_entity(self.connection, entity_id) + + def set_property(self, entity_id: int, property_name: str, property_value: Any): + query = """ + MATCH (n1:{}) + WHERE n1.id = $entity_id + SET n1.{} = $property_value; + """ + prepared_statement = self.connection.prepare(query.format(self.node_table_name, property_name)) + self.connection.execute(prepared_statement, {"entity_id": entity_id, "property_value": property_value}) + + def run_query(self, query: str, parameters: Optional[dict] = None) -> list[list]: + """Run a Cypher query and return the results""" + query_result = self.connection.execute(query=query, parameters=parameters) + retval = [] + while query_result.has_next(): + retval.append(query_result.get_next()) + return retval @classmethod def from_persist_dir( cls, persist_dir: str, + node_table_schema: dict[str, str], node_table_name: str = "entity", rel_table_name: str = "links", - ) -> "MotleyQuestionGraphStore": + ) -> "MotleyKuzuGraphStore": """Load from persist dir.""" try: import kuzu except ImportError: raise ImportError("Please install kuzu: pip install kuzu") database = kuzu.Database(persist_dir) - return cls(database, node_table_name, rel_table_name) + return cls(database, node_table_schema, node_table_name, rel_table_name) @classmethod - def from_dict(cls, config_dict: Dict[str, Any]) -> "MotleyQuestionGraphStore": + def from_dict(cls, config_dict: Dict[str, Any]) -> "MotleyKuzuGraphStore": """Initialize graph store from configuration dictionary. Args: @@ -173,25 +173,36 @@ def from_dict(cls, config_dict: Dict[str, Any]) -> "MotleyQuestionGraphStore": if __name__ == "__main__": from pathlib import Path + import shutil here = Path(__file__).parent db_path = here / "test1" + shutil.rmtree(db_path, ignore_errors=True) db = kuzu.Database(str(db_path)) - graph_store = MotleyQuestionGraphStore(db) + graph_store = MotleyKuzuGraphStore( + db, node_table_schema={"question": "STRING", "answer": "STRING", "context": "STRING"} + ) + + IS_SUBQUESTION_PREDICATE = "is_subquestion" + + q1_id = graph_store.create_entity({"question": "q1"}) + assert graph_store.get_entity(q1_id)["question"] == "q1" - q1_id = graph_store.create_question("q1") - assert graph_store.get_question(q1_id)["question"] == "q1" + q2_id = graph_store.create_entity({"question": "q2"}) + q3_id = graph_store.create_entity({"question": "q3"}) + q4_id = graph_store.create_entity({"question": "q4"}) + graph_store.create_rel(q1_id, q2_id, IS_SUBQUESTION_PREDICATE) + graph_store.create_rel(q1_id, q3_id, IS_SUBQUESTION_PREDICATE) + graph_store.create_rel(q3_id, q4_id, IS_SUBQUESTION_PREDICATE) - q2_id = graph_store.create_subquestion(q1_id, "q2") - q3_id = graph_store.create_subquestion(q1_id, "q3") - q4_id = graph_store.create_subquestion(q3_id, "q4") + graph_store.delete_entity(q4_id) + assert graph_store.get_entity(q4_id) is None - assert set(graph_store.get_subquestions(q1_id)) == {q2_id, q3_id} - assert set(graph_store.get_subquestions(q3_id)) == {q4_id} + graph_store.set_property(q2_id, property_name="answer", property_value="a2") + graph_store.set_property(q3_id, property_name="", property_value=["c3_1", "c3_2"]) - graph_store.delete_question(q4_id) - assert graph_store.get_question(q4_id) is None - assert not graph_store.get_subquestions(q3_id) + assert graph_store.get_entity(q2_id)["answer"] == "a2" + assert graph_store.get_entity(q3_id)["context"] == ["c3_1", "c3_2"] print(f"docker run -p 8000:8000 -v {db_path}:/database --rm kuzudb/explorer: latest") print("MATCH (A)-[r]->(B) RETURN *;") From 75071a493e10ca6edc6c11b44325f36887e10417 Mon Sep 17 00:00:00 2001 From: whimo Date: Fri, 26 Apr 2024 13:27:53 +0400 Subject: [PATCH 10/20] Kuzu graph store entity creation fix --- motleycrew/storage/kuzu_graph_store.py | 32 +++++++++++++++----------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/motleycrew/storage/kuzu_graph_store.py b/motleycrew/storage/kuzu_graph_store.py index d2831b67..96b9f48f 100644 --- a/motleycrew/storage/kuzu_graph_store.py +++ b/motleycrew/storage/kuzu_graph_store.py @@ -5,7 +5,6 @@ from typing import Any, Dict, List, Optional -import json import kuzu @@ -70,11 +69,23 @@ def get_entity(self, entity_id: int) -> Optional[dict]: item = row[0] return item + @staticmethod + def _dict_to_cypher_mapping_with_parameters(entity: dict) -> tuple[str, dict]: + parameters = {} + + cypher_mapping = "{" + for key, value in entity.items(): + cypher_mapping += f"{key}: ${key}, " + parameters[key] = value + + cypher_mapping = cypher_mapping.rstrip(", ") + "}" + return cypher_mapping, parameters + def create_entity(self, entity: dict) -> int: """Create a new entity and return its id""" + cypher_mapping, parameters = MotleyKuzuGraphStore._dict_to_cypher_mapping_with_parameters(entity) create_result = self.connection.execute( - "CREATE (n:{} $entity) RETURN n.id".format(self.node_table_name), - {"entity": entity}, + "CREATE (n:{} {}) RETURN n.id".format(self.node_table_name, cypher_mapping), parameters=parameters ) assert create_result.has_next() return create_result.get_next()[0] @@ -98,14 +109,8 @@ def delete_entity(self, entity_id: int) -> None: def delete_rels(connection: Any, entity_id: int) -> None: # Undirected relation removal is not supported for some reason connection.execute( - "MATCH (n1:{})-[r:{}]->(n2:{}) WHERE n1.id = $entity_id DELETE r;" - "MATCH (n1:{})<-[r:{}]-(n2:{}) WHERE n1.id = $entity_id DELETE r".format( - self.node_table_name, - self.rel_table_name, - self.node_table_name, - self.node_table_name, - self.rel_table_name, - self.node_table_name, + "MATCH (n1:{})-[r:{}]->(n2:{}) WHERE n1.id = $entity_id DELETE r".format( + self.node_table_name, self.rel_table_name, self.node_table_name ), {"entity_id": entity_id}, ) @@ -174,6 +179,7 @@ def from_dict(cls, config_dict: Dict[str, Any]) -> "MotleyKuzuGraphStore": if __name__ == "__main__": from pathlib import Path import shutil + import json here = Path(__file__).parent db_path = here / "test1" @@ -199,10 +205,10 @@ def from_dict(cls, config_dict: Dict[str, Any]) -> "MotleyKuzuGraphStore": assert graph_store.get_entity(q4_id) is None graph_store.set_property(q2_id, property_name="answer", property_value="a2") - graph_store.set_property(q3_id, property_name="", property_value=["c3_1", "c3_2"]) + graph_store.set_property(q3_id, property_name="context", property_value=json.dumps(["c3_1", "c3_2"])) assert graph_store.get_entity(q2_id)["answer"] == "a2" - assert graph_store.get_entity(q3_id)["context"] == ["c3_1", "c3_2"] + assert json.loads(graph_store.get_entity(q3_id)["context"]) == ["c3_1", "c3_2"] print(f"docker run -p 8000:8000 -v {db_path}:/database --rm kuzudb/explorer: latest") print("MATCH (A)-[r]->(B) RETURN *;") From 6d33b699012eb67666fd5b5c2e45a84a2276e6ac Mon Sep 17 00:00:00 2001 From: Egor Kraev Date: Fri, 26 Apr 2024 11:57:49 +0200 Subject: [PATCH 11/20] Question prioritizer first cut --- .../research_agent/question_prioritizer.py | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 examples/research_agent/question_prioritizer.py diff --git a/examples/research_agent/question_prioritizer.py b/examples/research_agent/question_prioritizer.py new file mode 100644 index 00000000..846f9296 --- /dev/null +++ b/examples/research_agent/question_prioritizer.py @@ -0,0 +1,30 @@ +from langchain.prompts import PromptTemplate + +from motleycrew.tool import LLMTool + +prompt = PromptTemplate.from_template( + """You are provided with the following list of questions: +{unanswered_questions} \n +Your task is to choose one question from the above list +that is the most pertinent to the following query:\n +'{original_question}' \n +Respond with one question out of the provided list of questions. +Return the question as it is without any edits.""" +) + +prioritizer = LLMTool( + name="Question prioritizer", + description="""Takes the original question and a list of derived questions, +and selects from the latter the one mpst pertinent to the former.""", + prompt=prompt, +) + + +if __name__ == "__main__": + q = "What color is the sky?" + unanswered = ["What time of day is it?", "Who was H.P.Lovecraft?"] + out = prioritizer.invoke( + {"unanswered_questions": str(unanswered), "original_question": q} + ) + print(out) + print("yay!") From 330e692bde1b574919c893f2d96733a440ee1d4f Mon Sep 17 00:00:00 2001 From: Egor Kraev Date: Fri, 26 Apr 2024 11:58:47 +0200 Subject: [PATCH 12/20] Question prioritizer first cut --- examples/research_agent/question_answerer.py | 0 motleycrew/agent/coordinator.py | 10 ----- motleycrew/tool/__init__.py | 2 + motleycrew/tool/llm_tool.py | 40 +++++++++++--------- 4 files changed, 24 insertions(+), 28 deletions(-) create mode 100644 examples/research_agent/question_answerer.py delete mode 100644 motleycrew/agent/coordinator.py diff --git a/examples/research_agent/question_answerer.py b/examples/research_agent/question_answerer.py new file mode 100644 index 00000000..e69de29b diff --git a/motleycrew/agent/coordinator.py b/motleycrew/agent/coordinator.py deleted file mode 100644 index 237325b7..00000000 --- a/motleycrew/agent/coordinator.py +++ /dev/null @@ -1,10 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Sequence - -from motleycrew.task import Task - - -class TaskCoordinator(ABC): - @abstractmethod - def order(self, tasks: Sequence[Task]) -> Sequence[Task]: - pass diff --git a/motleycrew/tool/__init__.py b/motleycrew/tool/__init__.py index 84596777..a9e7456e 100644 --- a/motleycrew/tool/__init__.py +++ b/motleycrew/tool/__init__.py @@ -1 +1,3 @@ from .tool import MotleyTool +from .llm_tool import LLMTool +from .image_generation import DallEImageGeneratorTool diff --git a/motleycrew/tool/llm_tool.py b/motleycrew/tool/llm_tool.py index 1b024abd..d2567303 100644 --- a/motleycrew/tool/llm_tool.py +++ b/motleycrew/tool/llm_tool.py @@ -1,10 +1,10 @@ -from typing import Optional +from typing import Optional, Type -from langchain_core.tools import Tool +from langchain_core.tools import StructuredTool from langchain_core.prompts import PromptTemplate from langchain_core.prompts.base import BasePromptTemplate from langchain_core.language_models import BaseLanguageModel -from langchain_core.pydantic_v1 import BaseModel, Field +from langchain_core.pydantic_v1 import BaseModel, Field, create_model from motleycrew.tool import MotleyTool from motleycrew.common import LLMFramework @@ -18,10 +18,14 @@ def __init__( description: str, prompt: str | BasePromptTemplate, llm: Optional[BaseLanguageModel] = None, - input_description: Optional[str] = "Input for the tool.", + input_schema: Optional[Type[BaseModel]] = None, ): langchain_tool = create_llm_langchain_tool( - name=name, description=description, prompt=prompt, llm=llm, input_description=input_description + name=name, + description=description, + prompt=prompt, + llm=llm, + input_schema=input_schema, ) super().__init__(langchain_tool) @@ -30,8 +34,8 @@ def create_llm_langchain_tool( name: str, description: str, prompt: str | BasePromptTemplate, - llm: Optional[BaseLanguageModel], - input_description: Optional[str], + llm: Optional[BaseLanguageModel] = None, + input_schema: Optional[Type[BaseModel]] = None, ): if llm is None: llm = init_llm(llm_framework=LLMFramework.LANGCHAIN) @@ -39,22 +43,22 @@ def create_llm_langchain_tool( if not isinstance(prompt, BasePromptTemplate): prompt = PromptTemplate.from_template(prompt) - assert len(prompt.input_variables) == 1, "Prompt must contain exactly one input variable" - input_var = prompt.input_variables[0] + if input_schema is None: + fields = { + var: (str, Field(description=f"Input {var} for the tool.")) + for var in prompt.input_variables + } - class LLMToolInput(BaseModel): - """Input for the tool.""" + # Create the LLMToolInput class dynamically + input_schema = create_model("LLMToolInput", **fields) - # TODO: how hard is it to get that name from prompt.input_variables? - input: str = Field(description=input_description) - - def call_llm(input: str) -> str: + def call_llm(**kwargs) -> str: chain = prompt | llm - return chain.invoke({input_var: input}) + return chain.invoke(kwargs) - return Tool.from_function( + return StructuredTool.from_function( func=call_llm, name=name, description=description, - args_schema=LLMToolInput, + args_schema=input_schema, ) From 1876740585a4671e5f055bbf841244e62abcaad6 Mon Sep 17 00:00:00 2001 From: whimo Date: Fri, 26 Apr 2024 15:30:50 +0400 Subject: [PATCH 13/20] Knowledge gaining orchestrator working implementation --- examples/research_agent/question_generator.py | 37 +++++---- examples/research_agent/question_inserter.py | 75 +++++++++++++++++++ examples/research_agent/question_struct.py | 35 +++++++++ examples/research_agent/research_agent.py | 48 ++++++++++-- motleycrew/storage/__init__.py | 2 + motleycrew/storage/graph_store.py | 29 +++++++ motleycrew/storage/kuzu_graph_store.py | 8 +- motleycrew/tool/question_insertion_tool.py | 74 ------------------ 8 files changed, 203 insertions(+), 105 deletions(-) create mode 100644 examples/research_agent/question_inserter.py create mode 100644 examples/research_agent/question_struct.py create mode 100644 motleycrew/storage/graph_store.py delete mode 100644 motleycrew/tool/question_insertion_tool.py diff --git a/examples/research_agent/question_generator.py b/examples/research_agent/question_generator.py index 9a2223d1..566e1451 100644 --- a/examples/research_agent/question_generator.py +++ b/examples/research_agent/question_generator.py @@ -1,4 +1,4 @@ -from typing import List, Optional, Dict, Any +from typing import Optional, Any import json from pathlib import Path @@ -6,7 +6,6 @@ from langchain_core.runnables import ( RunnablePassthrough, RunnableLambda, - RunnableParallel, ) from langchain_core.tools import Tool from langchain_core.prompts.base import BasePromptTemplate @@ -14,19 +13,20 @@ from langchain_core.pydantic_v1 import BaseModel, Field -# TODO: fallback interface if LlamaIndex is not available -from llama_index.core.graph_stores.types import GraphStore - from motleycrew.tool import MotleyTool from motleycrew.common import LLMFramework from motleycrew.common.llms import init_llm -from motleycrew.tool.question_insertion_tool import QuestionInsertionTool from motleycrew.common.utils import print_passthrough +from motleycrew.storage import MotleyGraphStore + +from question_struct import Question +from question_inserter import QuestionInsertionTool + default_prompt = PromptTemplate.from_template( """ You are a part of a team. The ultimate goal of your team is to -answer the following Question: '{question}'.\n +answer the following Question: '{question_text}'.\n Your team has discovered some new text (delimited by ```) that may be relevant to your ultimate goal. text: \n ``` {context} ``` \n Your task is to ask new questions that may help your team achieve the ultimate goal. @@ -57,7 +57,7 @@ class QuestionGeneratorTool(MotleyTool): def __init__( self, query_tool: MotleyTool, - graph: GraphStore, + graph: MotleyGraphStore, max_questions: int = 3, llm: Optional[BaseLanguageModel] = None, prompt: str | BasePromptTemplate = None, @@ -76,14 +76,12 @@ def __init__( class QuestionGeneratorToolInput(BaseModel): """Input for the Question Generator Tool.""" - question: str = Field( - description="The input question for which to generate subquestions." - ) + question: Question = Field(description="The input question for which to generate subquestions.") def create_question_generator_langchain_tool( query_tool: MotleyTool, - graph: GraphStore, + graph: MotleyGraphStore, max_questions: int = 3, llm: Optional[BaseLanguageModel] = None, prompt: str | BasePromptTemplate = None, @@ -98,14 +96,10 @@ def create_question_generator_langchain_tool( elif isinstance(prompt, str): prompt = PromptTemplate.from_template(prompt) - assert isinstance( - prompt, BasePromptTemplate - ), "Prompt must be a string or a BasePromptTemplate" + assert isinstance(prompt, BasePromptTemplate), "Prompt must be a string or a BasePromptTemplate" - def partial_inserter(question: dict[str, str]): - out = QuestionInsertionTool( - graph=graph, question=question["question"] - ).to_langchain_tool() + def partial_inserter(question: Question): + out = QuestionInsertionTool(graph=graph, question=question).to_langchain_tool() return (out,) def insert_questions(input_dict) -> None: @@ -124,7 +118,10 @@ def insert_questions(input_dict) -> None: } | RunnableLambda(print_passthrough) | { - "subquestions": prompt.partial(num_questions=max_questions) | llm, + "subquestions": RunnablePassthrough.assign(question_text=lambda x: x["question"]["question"].question) + | RunnableLambda(print_passthrough) + | prompt.partial(num_questions=max_questions) + | llm, "question_inserter": RunnablePassthrough(), } | RunnableLambda(insert_questions) diff --git a/examples/research_agent/question_inserter.py b/examples/research_agent/question_inserter.py new file mode 100644 index 00000000..4913452f --- /dev/null +++ b/examples/research_agent/question_inserter.py @@ -0,0 +1,75 @@ +from typing import List + +from pathlib import Path + +from langchain_core.pydantic_v1 import BaseModel, Field +from langchain_core.tools import Tool + +from motleycrew.storage import MotleyGraphStore +from motleycrew.tool import MotleyTool + +from question_struct import Question + + +IS_SUBQUESTION_PREDICATE = "is_subquestion" + + +class QuestionInsertionTool(MotleyTool): + def __init__(self, question: Question, graph: MotleyGraphStore): + + langchain_tool = create_question_insertion_langchain_tool( + name="Question Insertion Tool", + description="Insert a list of questions (supplied as a list of strings) into the graph.", + question=question, + graph=graph, + ) + + super().__init__(langchain_tool) + + +class QuestionInsertionToolInput(BaseModel): + """Subquestions of the current question, to be inserted into the knowledge graph.""" + + questions: List[str] = Field(description="List of questions to be inserted into the knowledge graph.") + + +def create_question_insertion_langchain_tool( + name: str, + description: str, + question: Question, + graph: MotleyGraphStore, +): + def insert_questions(questions: list[str]) -> None: + for subquestion in questions: + subquestion_data = graph.create_entity(Question(question=subquestion).serialize()) + subquestion_obj = Question.deserialize(subquestion_data) + graph.create_rel(from_id=question.id, to_id=subquestion_obj.id, predicate=IS_SUBQUESTION_PREDICATE) + + return Tool.from_function( + func=insert_questions, + name=name, + description=description, + args_schema=QuestionInsertionToolInput, + ) + + +if __name__ == "__main__": + import kuzu + from motleycrew.storage import MotleyKuzuGraphStore + + here = Path(__file__).parent + db_path = here / "test1" + db = kuzu.Database(db_path) + graph_store = MotleyKuzuGraphStore( + db, node_table_schema={"question": "STRING", "answer": "STRING", "context": "STRING"} + ) + + question_data = graph_store.create_entity(Question(question="What is the capital of France?").serialize()) + question = Question.deserialize(question_data) + + children = ["What is the capital of France?", "What is the capital of Germany?"] + tool = QuestionInsertionTool(question=question, graph=graph_store) + tool.invoke({"questions": children}) + + print(f"docker run -p 8000:8000 -v {db_path}:/database --rm kuzudb/explorer: latest") + print("MATCH (A)-[r]->(B) RETURN *;") diff --git a/examples/research_agent/question_struct.py b/examples/research_agent/question_struct.py new file mode 100644 index 00000000..a460586d --- /dev/null +++ b/examples/research_agent/question_struct.py @@ -0,0 +1,35 @@ +from typing import Optional +from dataclasses import dataclass +import json + + +@dataclass +class Question: + id: Optional[int] = None + question: Optional[str] = None + answer: Optional[str] = None + context: Optional[list[str]] = None + + def serialize(self): + data = {} + + if self.id: + data["id"] = json.dumps(self.id) + if self.context: + data["question"] = json.dumps(self.question) + if self.context: + data["answer"] = json.dumps(self.answer) + if self.context: + data["context"] = json.dumps(self.context) + + return data + + @staticmethod + def deserialize(data: dict): + context_raw = data["context"] + if context_raw: + context = json.loads(context_raw) + else: + context = None + + return Question(id=data["id"], question=data["question"], answer=data["answer"], context=context) diff --git a/examples/research_agent/research_agent.py b/examples/research_agent/research_agent.py index 491d9846..fc9c6285 100644 --- a/examples/research_agent/research_agent.py +++ b/examples/research_agent/research_agent.py @@ -3,10 +3,16 @@ import kuzu from langchain.prompts import PromptTemplate +from langchain.tools import Tool +from motleycrew import MotleyTool from motleycrew.storage import MotleyKuzuGraphStore from motleycrew.tool.llm_tool import LLMTool +from question_struct import Question +from question_generator import QuestionGeneratorTool +from question_generator import QuestionGeneratorToolInput + logging.basicConfig(stream=sys.stdout, level=logging.INFO) @@ -27,20 +33,21 @@ class KnowledgeGainingOrchestrator: - def __init__(self, db_path: str): + def __init__(self, db_path: str, query_tool: MotleyTool): self.db = kuzu.Database(db_path) self.storage = MotleyKuzuGraphStore( self.db, node_table_schema={"question": "STRING", "answer": "STRING", "context": "STRING"} ) + self.query_tool = query_tool self.question_prioritization_tool = LLMTool( name="question_prioritization_tool", description="find the most important question", prompt=QUESTION_PRIORITIZATION_TEMPLATE, ) - self.question_generation_tool = None + self.question_generation_tool = QuestionGeneratorTool(query_tool=query_tool, graph=self.storage) - def get_unanswered_questions(self, only_without_children: bool = False) -> list[dict]: + def get_unanswered_questions(self, only_without_children: bool = False) -> list[Question]: if only_without_children: query = "MATCH (n1:{}) WHERE n1.answer IS NULL AND NOT (n1)-[:{}]->(:{}) RETURN n1;".format( self.storage.node_table_name, self.storage.rel_table_name, self.storage.node_table_name @@ -49,7 +56,7 @@ def get_unanswered_questions(self, only_without_children: bool = False) -> list[ query = "MATCH (n1:{}) WHERE n1.answer IS NULL RETURN n1;".format(self.storage.node_table_name) query_result = self.storage.run_query(query) - return [row[0] for row in query_result] # flatten + return [Question.deserialize(row[0]) for row in query_result] def __call__(self, query: str, max_iter: int): self.storage.create_entity({"question": query}) @@ -60,25 +67,50 @@ def __call__(self, query: str, max_iter: int): unanswered_questions = self.get_unanswered_questions(only_without_children=True) logging.info("Loaded unanswered questions: %s", unanswered_questions) - tool_input = "\n".join(f"{i}. {question}" for i, question in enumerate(unanswered_questions)) - most_pertinent_question_raw = self.question_prioritization_tool.invoke(tool_input) + question_prioritization_tool_input = { + "unanswered_questions": "\n".join( + f"{i}. {question.question}" for i, question in enumerate(unanswered_questions) + ), + "original_question": query, + } + most_pertinent_question_raw = self.question_prioritization_tool.invoke( + question_prioritization_tool_input + ).content logging.info("Most pertinent question according to the tool: %s", most_pertinent_question_raw) i, most_pertinent_question_text = most_pertinent_question_raw.split(".", 1) + i = int(i) assert i < len(unanswered_questions) most_pertinent_question = unanswered_questions[i] - assert most_pertinent_question_text.strip() == most_pertinent_question["question"].strip() + assert most_pertinent_question_text.strip() == most_pertinent_question.question.strip() logging.info("Generating new questions") + self.question_generation_tool.invoke({"question": most_pertinent_question}) if __name__ == "__main__": from pathlib import Path import shutil + from dotenv import load_dotenv + load_dotenv() here = Path(__file__).parent db_path = here / "research_db" shutil.rmtree(db_path, ignore_errors=True) - orchestrator = KnowledgeGainingOrchestrator(db_path=str(db_path)) + query_tool = MotleyTool.from_langchain_tool( + Tool.from_function( + func=lambda question: [ + "Germany has consisted of many different states over the years", + "The capital of France has moved in 1815, from Lyons to Paris", + "France actually has two capitals, one in the north and one in the south", + ], + name="Query Tool", + description="Query the library for relevant information.", + args_schema=QuestionGeneratorToolInput, + ) + ) + + orchestrator = KnowledgeGainingOrchestrator(db_path=str(db_path), query_tool=query_tool) + orchestrator(query="Why did Arjuna kill his step-brother?", max_iter=5) diff --git a/motleycrew/storage/__init__.py b/motleycrew/storage/__init__.py index 1f544aec..518564ea 100644 --- a/motleycrew/storage/__init__.py +++ b/motleycrew/storage/__init__.py @@ -1 +1,3 @@ +from .graph_store import MotleyGraphStore + from .kuzu_graph_store import MotleyKuzuGraphStore diff --git a/motleycrew/storage/graph_store.py b/motleycrew/storage/graph_store.py new file mode 100644 index 00000000..fd7fe6b2 --- /dev/null +++ b/motleycrew/storage/graph_store.py @@ -0,0 +1,29 @@ +from abc import ABC, abstractmethod +from typing import Optional, Any + + +class MotleyGraphStore(ABC): + @abstractmethod + def check_entity_exists(self, entity_id: int) -> bool: + pass + + @abstractmethod + def get_entity(self, entity_id: int) -> Optional[dict]: + pass + + @abstractmethod + def create_entity(self, entity: dict) -> dict: + """Create a new entity and return it""" + pass + + @abstractmethod + def create_rel(self, from_id: int, to_id: int, predicate: str) -> None: + pass + + @abstractmethod + def delete_entity(self, entity_id: int) -> None: + """Delete a given entity and its relations""" + pass + + def set_property(self, entity_id: int, property_name: str, property_value: Any): + pass diff --git a/motleycrew/storage/kuzu_graph_store.py b/motleycrew/storage/kuzu_graph_store.py index 96b9f48f..31ecbe44 100644 --- a/motleycrew/storage/kuzu_graph_store.py +++ b/motleycrew/storage/kuzu_graph_store.py @@ -7,8 +7,10 @@ import kuzu +from motleycrew.storage import MotleyGraphStore -class MotleyKuzuGraphStore: + +class MotleyKuzuGraphStore(MotleyGraphStore): def __init__( self, database: Any, @@ -81,11 +83,11 @@ def _dict_to_cypher_mapping_with_parameters(entity: dict) -> tuple[str, dict]: cypher_mapping = cypher_mapping.rstrip(", ") + "}" return cypher_mapping, parameters - def create_entity(self, entity: dict) -> int: + def create_entity(self, entity: dict) -> dict: """Create a new entity and return its id""" cypher_mapping, parameters = MotleyKuzuGraphStore._dict_to_cypher_mapping_with_parameters(entity) create_result = self.connection.execute( - "CREATE (n:{} {}) RETURN n.id".format(self.node_table_name, cypher_mapping), parameters=parameters + "CREATE (n:{} {}) RETURN n".format(self.node_table_name, cypher_mapping), parameters=parameters ) assert create_result.has_next() return create_result.get_next()[0] diff --git a/motleycrew/tool/question_insertion_tool.py b/motleycrew/tool/question_insertion_tool.py deleted file mode 100644 index 3d2f57f2..00000000 --- a/motleycrew/tool/question_insertion_tool.py +++ /dev/null @@ -1,74 +0,0 @@ -from typing import List - -from pathlib import Path - -from langchain_core.pydantic_v1 import BaseModel, Field -from langchain_core.tools import Tool - -# TODO: fallback interface if LlamaIndex is not available -from llama_index.core.graph_stores.types import GraphStore - -from motleycrew.tool import MotleyTool - - -class QuestionInsertionTool(MotleyTool): - def __init__(self, question: str, graph: GraphStore): - - langchain_tool = create_question_insertion_langchain_tool( - name="Question Insertion Tool", - description="Insert a list of questions (supplied as a list of strings) into the graph.", - question=question, - graph=graph, - ) - - super().__init__(langchain_tool) - - -class QuestionInsertionToolInput(BaseModel): - """Subquestions of the current question, to be inserted into the knowledge graph.""" - - questions: List[str] = Field( - description="List of questions to be inserted into the knowledge graph." - ) - - -def create_question_insertion_langchain_tool( - name: str, - description: str, - question: str, - graph: GraphStore, -): - def insert_questions(questions: list[str]) -> None: - for subquestion in questions: - # TODO: change! This is a placeholder implementation - graph.upsert_triplet(question, "IS_SUBQUESTION", subquestion) - - return Tool.from_function( - func=insert_questions, - name=name, - description=description, - args_schema=QuestionInsertionToolInput, - ) - - -if __name__ == "__main__": - import kuzu - from llama_index.graph_stores.kuzu import KuzuGraphStore - - here = Path(__file__).parent - db_path = here / "test1" - db = kuzu.Database(db_path) - graph_store = KuzuGraphStore(db) - - children_1 = ["What is the capital of France?", "What is the capital of Germany?"] - children_2 = ["What is the capital of Italy?", "What is the capital of Spain?"] - tool = QuestionInsertionTool(question="Starting question", graph=graph_store) - tool.invoke({"questions": children_1}) - tool2 = QuestionInsertionTool( - question="What is the capital of France?", graph=graph_store - ) - tool2.invoke({"questions": children_2}) - print( - f"docker run -p 8000:8000 -v {db_path}:/database --rm kuzudb/explorer: latest" - ) - print("MATCH (A)-[r]->(B) RETURN *;") From e3a4562e728ca57c4886670498733c9bf19d2e75 Mon Sep 17 00:00:00 2001 From: Egor Kraev Date: Fri, 26 Apr 2024 14:08:13 +0200 Subject: [PATCH 14/20] Draft question answerer --- .../research_agent/answer_orchestrator.py | 16 ++ examples/research_agent/question_answerer.py | 138 ++++++++++++++++++ .../research_agent/question_orchestrator.py | 84 +++++++++++ examples/research_agent/research_agent.py | 96 +++--------- 4 files changed, 258 insertions(+), 76 deletions(-) create mode 100644 examples/research_agent/answer_orchestrator.py create mode 100644 examples/research_agent/question_orchestrator.py diff --git a/examples/research_agent/answer_orchestrator.py b/examples/research_agent/answer_orchestrator.py new file mode 100644 index 00000000..297b4531 --- /dev/null +++ b/examples/research_agent/answer_orchestrator.py @@ -0,0 +1,16 @@ +from llama_index.core.graph_stores.types import GraphStore + +from .question_answerer import AnswerSubQuestionTool + + +def answer_orchestrator(graph: GraphStore): + last_question = None + answerer = AnswerSubQuestionTool(graph=graph) + while True: + questions = graph.get_unanswered_available_questions() + if not len(questions): + return last_question + else: + last_question = questions[0] + answerer.invoke({"question": last_question}) + return graph.retrieve(last_question.id) diff --git a/examples/research_agent/question_answerer.py b/examples/research_agent/question_answerer.py index e69de29b..a24a2a4f 100644 --- a/examples/research_agent/question_answerer.py +++ b/examples/research_agent/question_answerer.py @@ -0,0 +1,138 @@ +from typing import Optional, List, Tuple + + +from langchain_core.pydantic_v1 import BaseModel, Field +from langchain.prompts import PromptTemplate +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts.base import BasePromptTemplate +from langchain_core.tools import StructuredTool, Tool +from langchain_core.runnables import ( + RunnablePassthrough, + RunnableLambda, + RunnableParallel, + chain, +) + +from llama_index.core.graph_stores.types import GraphStore +from motleycrew.tool import MotleyTool, LLMTool +from motleycrew.common.utils import print_passthrough + +_default_prompt = PromptTemplate.from_template( + """ + You are a research agent who answers complex questions with clear, crisp and detailed answers. + You are provided with a question and some research notes prepared by your team. + Question: {question} \n + Notes: {notes} \n + Your task is to answer the question entirely based on the given notes. + The notes contain a list of intermediate-questions and answers that may be helpful to you in writing an answer. + Use only the most relevant information from the notes while writing your answer. + Do not use any prior knowledge while writing your answer, Do not make up the answer. + If the notes are not relevant to the question, just return 'Context is insufficient to answer the question'. + Remember your goal is to answer the question as objectively as possible. + Write your answer succinctly in less than {answer_length} words.""" +) + + +class AnswerSubQuestionTool(MotleyTool): + def __init__( + self, + graph: GraphStore, + prompt: str | BasePromptTemplate = None, + ): + langchain_tool = create_answer_question_langchain_tool( + graph=graph, + prompt=prompt, + ) + + super().__init__(langchain_tool) + + +class QuestionAnswererInput(BaseModel): + """Data on the question to answer.""" + + question_id: int = Field( + description="Id of the question node to process.", + ) + notes: str = Field( + description="The notes that contain the sub-questions and their answers.", + ) + question: str = Field( + description="The question to answer.", + ) + + +def create_answer_question_langchain_tool( + graph: GraphStore, + prompt: str | BasePromptTemplate = None, +) -> Tool: + """ + Creates a LangChainTool for the AnswerSubQuestionTool. + """ + if prompt is None: + prompt = _default_prompt + + subquestion_answerer = LLMTool( + prompt=prompt, + name="Question answerer", + description="Tool to answer a question from notes and sub-questions", + ) + """ + Gets a valid question node ID, question, and context as input dict + Retrieves child quuestion answers + Feeds all that to LLM to answer Q (research_agent prompt) + Attaches answer to the node + """ + + @chain + def retrieve_sub_question_answers(**kwargs) -> List[Tuple[str, str]]: + """ + Retrieves the answers to the sub-questions of a given question. + """ + sub_questions = graph.get_sub_questions(kwargs["question_id"]) + out = [] + for sq in sub_questions: + if sq["answer"] is not None: + out.append((sq["question"], sq["answer"])) + return out + + @chain + def merge_notes(**kwargs) -> str: + """ + Merges the notes and the sub-question answers. + """ + notes = kwargs["notes"] + sub_question_answers = kwargs["sub_question_answers"] + notes += "\n\n" + for q, a in sub_question_answers: + notes += f"Q: {q}\nA: {a}\n\n" + return notes + + @chain + def insert_answer(answer: str, question_id: int) -> None: + """ + Inserts the answer into the graph. + """ + graph.update_property(id=question_id, name="answer", value=answer) + + this_chain = ( + { + "sub_question_answers": retrieve_sub_question_answers, + "input": RunnablePassthrough(), + } + | merge_notes + | { + "answer": subquestion_answerer.to_langchain_tool(), + "question_id": RunnablePassthrough(), + } + | RunnableLambda(print_passthrough) + | insert_answer + ) + + langchain_tool = Tool.from_function( + func=this_chain.invoke, + name="Answer Sub-Question Tool", + description="Answer a question based on the notes and sub-questions.", + args_schema=QuestionAnswererInput, + ) + + return langchain_tool diff --git a/examples/research_agent/question_orchestrator.py b/examples/research_agent/question_orchestrator.py new file mode 100644 index 00000000..491d9846 --- /dev/null +++ b/examples/research_agent/question_orchestrator.py @@ -0,0 +1,84 @@ +import logging +import sys +import kuzu + +from langchain.prompts import PromptTemplate + +from motleycrew.storage import MotleyKuzuGraphStore +from motleycrew.tool.llm_tool import LLMTool + +logging.basicConfig(stream=sys.stdout, level=logging.INFO) + + +QUESTION_PRIORITIZATION_TEMPLATE = PromptTemplate( + template=( + "You are provided with the following list of questions:" + " {unanswered_questions} \n" + " Your task is to choose one question from the above list" + " that is the most pertinent to the following query:\n" + " '{original_question}' \n" + " Respond with one question out of the provided list of questions." + " Return the questions as it is without any edits." + " Format your response like:\n" + " #. question" + ), + input_variables=["unanswered_questions", "original_question"], +) + + +class KnowledgeGainingOrchestrator: + def __init__(self, db_path: str): + self.db = kuzu.Database(db_path) + self.storage = MotleyKuzuGraphStore( + self.db, node_table_schema={"question": "STRING", "answer": "STRING", "context": "STRING"} + ) + + self.question_prioritization_tool = LLMTool( + name="question_prioritization_tool", + description="find the most important question", + prompt=QUESTION_PRIORITIZATION_TEMPLATE, + ) + self.question_generation_tool = None + + def get_unanswered_questions(self, only_without_children: bool = False) -> list[dict]: + if only_without_children: + query = "MATCH (n1:{}) WHERE n1.answer IS NULL AND NOT (n1)-[:{}]->(:{}) RETURN n1;".format( + self.storage.node_table_name, self.storage.rel_table_name, self.storage.node_table_name + ) + else: + query = "MATCH (n1:{}) WHERE n1.answer IS NULL RETURN n1;".format(self.storage.node_table_name) + + query_result = self.storage.run_query(query) + return [row[0] for row in query_result] # flatten + + def __call__(self, query: str, max_iter: int): + self.storage.create_entity({"question": query}) + + for iter_n in range(max_iter): + logging.info("====== Iteration %s of %s ======", iter_n, max_iter) + + unanswered_questions = self.get_unanswered_questions(only_without_children=True) + logging.info("Loaded unanswered questions: %s", unanswered_questions) + + tool_input = "\n".join(f"{i}. {question}" for i, question in enumerate(unanswered_questions)) + most_pertinent_question_raw = self.question_prioritization_tool.invoke(tool_input) + logging.info("Most pertinent question according to the tool: %s", most_pertinent_question_raw) + + i, most_pertinent_question_text = most_pertinent_question_raw.split(".", 1) + assert i < len(unanswered_questions) + + most_pertinent_question = unanswered_questions[i] + assert most_pertinent_question_text.strip() == most_pertinent_question["question"].strip() + + logging.info("Generating new questions") + + +if __name__ == "__main__": + from pathlib import Path + import shutil + + here = Path(__file__).parent + db_path = here / "research_db" + shutil.rmtree(db_path, ignore_errors=True) + + orchestrator = KnowledgeGainingOrchestrator(db_path=str(db_path)) diff --git a/examples/research_agent/research_agent.py b/examples/research_agent/research_agent.py index 491d9846..34b4b5f6 100644 --- a/examples/research_agent/research_agent.py +++ b/examples/research_agent/research_agent.py @@ -1,84 +1,28 @@ -import logging -import sys -import kuzu +import uuid +import os -from langchain.prompts import PromptTemplate +from dotenv import load_dotenv -from motleycrew.storage import MotleyKuzuGraphStore -from motleycrew.tool.llm_tool import LLMTool -logging.basicConfig(stream=sys.stdout, level=logging.INFO) +from langchain.embeddings import OpenAIEmbeddings +from langchain.vectorstores import PGVector +load_dotenv("../.env") -QUESTION_PRIORITIZATION_TEMPLATE = PromptTemplate( - template=( - "You are provided with the following list of questions:" - " {unanswered_questions} \n" - " Your task is to choose one question from the above list" - " that is the most pertinent to the following query:\n" - " '{original_question}' \n" - " Respond with one question out of the provided list of questions." - " Return the questions as it is without any edits." - " Format your response like:\n" - " #. question" - ), - input_variables=["unanswered_questions", "original_question"], +DB_PASSWORD = os.environ["SUPABASE_PASSWORD"] +DB_DBUSER = os.environ["SUPABASE_DBUSER"] +DB_DATABASE = os.environ["SUPABASE_DATABASE"] +DB_HOST = os.environ["SUPABASE_HOST"] +DB_PORT = os.environ["SUPABASE_PORT"] +DB_CONN_STRING = ( + f"postgresql://{DB_DBUSER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_DATABASE}" ) +text_embedding_model = "text-embedding-ada-002" +embeddings = OpenAIEmbeddings(model=text_embedding_model) -class KnowledgeGainingOrchestrator: - def __init__(self, db_path: str): - self.db = kuzu.Database(db_path) - self.storage = MotleyKuzuGraphStore( - self.db, node_table_schema={"question": "STRING", "answer": "STRING", "context": "STRING"} - ) - - self.question_prioritization_tool = LLMTool( - name="question_prioritization_tool", - description="find the most important question", - prompt=QUESTION_PRIORITIZATION_TEMPLATE, - ) - self.question_generation_tool = None - - def get_unanswered_questions(self, only_without_children: bool = False) -> list[dict]: - if only_without_children: - query = "MATCH (n1:{}) WHERE n1.answer IS NULL AND NOT (n1)-[:{}]->(:{}) RETURN n1;".format( - self.storage.node_table_name, self.storage.rel_table_name, self.storage.node_table_name - ) - else: - query = "MATCH (n1:{}) WHERE n1.answer IS NULL RETURN n1;".format(self.storage.node_table_name) - - query_result = self.storage.run_query(query) - return [row[0] for row in query_result] # flatten - - def __call__(self, query: str, max_iter: int): - self.storage.create_entity({"question": query}) - - for iter_n in range(max_iter): - logging.info("====== Iteration %s of %s ======", iter_n, max_iter) - - unanswered_questions = self.get_unanswered_questions(only_without_children=True) - logging.info("Loaded unanswered questions: %s", unanswered_questions) - - tool_input = "\n".join(f"{i}. {question}" for i, question in enumerate(unanswered_questions)) - most_pertinent_question_raw = self.question_prioritization_tool.invoke(tool_input) - logging.info("Most pertinent question according to the tool: %s", most_pertinent_question_raw) - - i, most_pertinent_question_text = most_pertinent_question_raw.split(".", 1) - assert i < len(unanswered_questions) - - most_pertinent_question = unanswered_questions[i] - assert most_pertinent_question_text.strip() == most_pertinent_question["question"].strip() - - logging.info("Generating new questions") - - -if __name__ == "__main__": - from pathlib import Path - import shutil - - here = Path(__file__).parent - db_path = here / "research_db" - shutil.rmtree(db_path, ignore_errors=True) - - orchestrator = KnowledgeGainingOrchestrator(db_path=str(db_path)) +mahabharata_store = PGVector( + collection_name="mahabharata", + connection_string=DB_CONN_STRING, + embedding_function=embeddings, +) From e8deaec073e458f05542495c1d3c4cfcc96c3c85 Mon Sep 17 00:00:00 2001 From: Egor Kraev Date: Fri, 26 Apr 2024 14:32:46 +0200 Subject: [PATCH 15/20] tweak update_properties --- examples/research_agent/question_answerer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/research_agent/question_answerer.py b/examples/research_agent/question_answerer.py index a24a2a4f..e87a42d0 100644 --- a/examples/research_agent/question_answerer.py +++ b/examples/research_agent/question_answerer.py @@ -112,7 +112,7 @@ def insert_answer(answer: str, question_id: int) -> None: """ Inserts the answer into the graph. """ - graph.update_property(id=question_id, name="answer", value=answer) + graph.update_properties(id=question_id, values={"answer": answer}) this_chain = ( { From 4df19832e9ed51aeac037425998241a5422f4970 Mon Sep 17 00:00:00 2001 From: Egor Kraev Date: Fri, 26 Apr 2024 15:15:14 +0200 Subject: [PATCH 16/20] Ingestion sort of works --- .../research_agent/research_agent_main.py | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 examples/research_agent/research_agent_main.py diff --git a/examples/research_agent/research_agent_main.py b/examples/research_agent/research_agent_main.py new file mode 100644 index 00000000..3623e857 --- /dev/null +++ b/examples/research_agent/research_agent_main.py @@ -0,0 +1,43 @@ +import os.path + +import pandas as pd + +from llama_index.core.node_parser import SentenceSplitter +from llama_index.embeddings.openai import OpenAIEmbedding + +from llama_index.core import ( + VectorStoreIndex, + SimpleDirectoryReader, + StorageContext, + load_index_from_storage, +) + +text_embedding_model = "text-embedding-ada-002" +embeddings = OpenAIEmbedding(model=text_embedding_model) + +# check if storage already exists +PERSIST_DIR = "./storage" +here = os.path.dirname(os.path.abspath(__file__)) +root = os.path.realpath(os.path.join(here, "../../..")) +DATA_DIR = os.path.join(root, "mahabharata/text/TinyTales") +if not os.path.exists(PERSIST_DIR): + # load the documents and create the index + documents = SimpleDirectoryReader(DATA_DIR).load_data() + index = VectorStoreIndex.from_documents( + documents, transformations=[SentenceSplitter(chunk_size=512), embeddings] + ) + # store it for later + index.storage_context.persist(persist_dir=PERSIST_DIR) +else: + # load the existing index + storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR) + index = load_index_from_storage(storage_context) + +# Either way we can now query the index +query_engine = index.as_query_engine( + similarity_top_k=10, embeddings=embeddings, response_mode="tree_summarize" +) +response = query_engine.query( + "What are the most interesting facts about Arjuna?", +) +print(response) From e0b5ea443df3f6d42ac1a1dfb91f2fe5a34f32ff Mon Sep 17 00:00:00 2001 From: whimo Date: Fri, 26 Apr 2024 17:47:30 +0400 Subject: [PATCH 17/20] WIP question answerer --- .../research_agent/answer_orchestrator.py | 78 +++++++++++- examples/research_agent/question_answerer.py | 14 ++- examples/research_agent/question_generator.py | 6 +- .../research_agent/question_orchestrator.py | 87 +++++++------ .../research_agent/question_prioritizer.py | 40 +++--- examples/research_agent/question_struct.py | 4 +- examples/research_agent/research_agent.py | 116 ------------------ motleycrew/storage/graph_store.py | 9 ++ 8 files changed, 172 insertions(+), 182 deletions(-) diff --git a/examples/research_agent/answer_orchestrator.py b/examples/research_agent/answer_orchestrator.py index 297b4531..361b736c 100644 --- a/examples/research_agent/answer_orchestrator.py +++ b/examples/research_agent/answer_orchestrator.py @@ -1,6 +1,82 @@ from llama_index.core.graph_stores.types import GraphStore -from .question_answerer import AnswerSubQuestionTool +import logging +import sys +import kuzu + +from langchain.prompts import PromptTemplate +from langchain.tools import Tool + +from motleycrew import MotleyTool +from motleycrew.storage import MotleyGraphStore +from motleycrew.tool.llm_tool import LLMTool + +from question_struct import Question +from question_generator import QuestionGeneratorTool +from question_generator import QuestionGeneratorToolInput +from question_answerer import AnswerSubQuestionTool + +logging.basicConfig(stream=sys.stdout, level=logging.INFO) + + +class AnswerOrchestrator: + def __init__(self, storage: MotleyGraphStore, query_tool: MotleyTool): + self.storage = storage + self.query_tool = query_tool + self.question_answering_tool = AnswerSubQuestionTool(graph=self.storage) + + def get_unanswered_available_questions(self) -> list[Question]: + query = "MATCH (n1:{}) WHERE n1.answer IS NULL AND NOT (n1)-[:{}]->(:{}) RETURN n1;".format( + self.storage.node_table_name, self.storage.rel_table_name, self.storage.node_table_name + ) + + query_result = self.storage.run_query(query) + return [Question.deserialize(row[0]) for row in query_result] + + def __call__(self): + last_question = None + + while True: + questions = self.storage.get_unanswered_available_questions() + if not len(questions): + return last_question + else: + last_question = questions[0] + answerer.invoke({"question": last_question}) + return graph.retrieve(last_question.id) + + +if __name__ == "__main__": + from pathlib import Path + import shutil + from dotenv import load_dotenv + from motleycrew.storage import MotleyKuzuGraphStore + + load_dotenv() + here = Path(__file__).parent + db_path = here / "research_db" + shutil.rmtree(db_path, ignore_errors=True) + + db = kuzu.Database(db_path) + storage = MotleyKuzuGraphStore( + db, node_table_schema={"question": "STRING", "answer": "STRING", "context": "STRING"} + ) + + query_tool = MotleyTool.from_langchain_tool( + Tool.from_function( + func=lambda question: [ + "Germany has consisted of many different states over the years", + "The capital of France has moved in 1815, from Lyons to Paris", + "France actually has two capitals, one in the north and one in the south", + ], + name="Query Tool", + description="Query the library for relevant information.", + args_schema=QuestionGeneratorToolInput, + ) + ) + + orchestrator = KnowledgeGainingOrchestrator(storage=storage, query_tool=query_tool) + orchestrator(query="Why did Arjuna kill his step-brother?", max_iter=5) def answer_orchestrator(graph: GraphStore): diff --git a/examples/research_agent/question_answerer.py b/examples/research_agent/question_answerer.py index e87a42d0..d7a7876b 100644 --- a/examples/research_agent/question_answerer.py +++ b/examples/research_agent/question_answerer.py @@ -15,8 +15,10 @@ from llama_index.core.graph_stores.types import GraphStore from motleycrew.tool import MotleyTool, LLMTool +from motleycrew.storage import MotleyGraphStore from motleycrew.common.utils import print_passthrough + _default_prompt = PromptTemplate.from_template( """ You are a research agent who answers complex questions with clear, crisp and detailed answers. @@ -36,7 +38,7 @@ class AnswerSubQuestionTool(MotleyTool): def __init__( self, - graph: GraphStore, + graph: MotleyGraphStore, prompt: str | BasePromptTemplate = None, ): langchain_tool = create_answer_question_langchain_tool( @@ -62,7 +64,7 @@ class QuestionAnswererInput(BaseModel): def create_answer_question_langchain_tool( - graph: GraphStore, + graph: MotleyGraphStore, prompt: str | BasePromptTemplate = None, ) -> Tool: """ @@ -77,10 +79,10 @@ def create_answer_question_langchain_tool( description="Tool to answer a question from notes and sub-questions", ) """ - Gets a valid question node ID, question, and context as input dict - Retrieves child quuestion answers - Feeds all that to LLM to answer Q (research_agent prompt) - Attaches answer to the node + Gets a valid question node ID, question, and context as input dict + Retrieves child question answers + Feeds all that to LLM to answer Q (research_agent prompt) + Attaches answer to the node """ @chain diff --git a/examples/research_agent/question_generator.py b/examples/research_agent/question_generator.py index 566e1451..0ea1ecad 100644 --- a/examples/research_agent/question_generator.py +++ b/examples/research_agent/question_generator.py @@ -98,8 +98,8 @@ def create_question_generator_langchain_tool( assert isinstance(prompt, BasePromptTemplate), "Prompt must be a string or a BasePromptTemplate" - def partial_inserter(question: Question): - out = QuestionInsertionTool(graph=graph, question=question).to_langchain_tool() + def partial_inserter(input_dict: dict): + out = QuestionInsertionTool(graph=graph, question=input_dict["question"]).to_langchain_tool() return (out,) def insert_questions(input_dict) -> None: @@ -119,11 +119,11 @@ def insert_questions(input_dict) -> None: | RunnableLambda(print_passthrough) | { "subquestions": RunnablePassthrough.assign(question_text=lambda x: x["question"]["question"].question) - | RunnableLambda(print_passthrough) | prompt.partial(num_questions=max_questions) | llm, "question_inserter": RunnablePassthrough(), } + | RunnableLambda(print_passthrough) | RunnableLambda(insert_questions) ) diff --git a/examples/research_agent/question_orchestrator.py b/examples/research_agent/question_orchestrator.py index 491d9846..8e629500 100644 --- a/examples/research_agent/question_orchestrator.py +++ b/examples/research_agent/question_orchestrator.py @@ -2,45 +2,27 @@ import sys import kuzu -from langchain.prompts import PromptTemplate +from langchain.tools import Tool -from motleycrew.storage import MotleyKuzuGraphStore -from motleycrew.tool.llm_tool import LLMTool - -logging.basicConfig(stream=sys.stdout, level=logging.INFO) +from motleycrew import MotleyTool +from motleycrew.storage import MotleyGraphStore +from question_struct import Question +from question_generator import QuestionGeneratorTool +from question_generator import QuestionGeneratorToolInput +from question_prioritizer import QuestionPrioritizerTool -QUESTION_PRIORITIZATION_TEMPLATE = PromptTemplate( - template=( - "You are provided with the following list of questions:" - " {unanswered_questions} \n" - " Your task is to choose one question from the above list" - " that is the most pertinent to the following query:\n" - " '{original_question}' \n" - " Respond with one question out of the provided list of questions." - " Return the questions as it is without any edits." - " Format your response like:\n" - " #. question" - ), - input_variables=["unanswered_questions", "original_question"], -) +logging.basicConfig(stream=sys.stdout, level=logging.INFO) class KnowledgeGainingOrchestrator: - def __init__(self, db_path: str): - self.db = kuzu.Database(db_path) - self.storage = MotleyKuzuGraphStore( - self.db, node_table_schema={"question": "STRING", "answer": "STRING", "context": "STRING"} - ) + def __init__(self, storage: MotleyGraphStore, query_tool: MotleyTool): + self.storage = storage + self.query_tool = query_tool + self.question_prioritization_tool = QuestionPrioritizerTool() + self.question_generation_tool = QuestionGeneratorTool(query_tool=query_tool, graph=self.storage) - self.question_prioritization_tool = LLMTool( - name="question_prioritization_tool", - description="find the most important question", - prompt=QUESTION_PRIORITIZATION_TEMPLATE, - ) - self.question_generation_tool = None - - def get_unanswered_questions(self, only_without_children: bool = False) -> list[dict]: + def get_unanswered_questions(self, only_without_children: bool = False) -> list[Question]: if only_without_children: query = "MATCH (n1:{}) WHERE n1.answer IS NULL AND NOT (n1)-[:{}]->(:{}) RETURN n1;".format( self.storage.node_table_name, self.storage.rel_table_name, self.storage.node_table_name @@ -49,7 +31,7 @@ def get_unanswered_questions(self, only_without_children: bool = False) -> list[ query = "MATCH (n1:{}) WHERE n1.answer IS NULL RETURN n1;".format(self.storage.node_table_name) query_result = self.storage.run_query(query) - return [row[0] for row in query_result] # flatten + return [Question.deserialize(row[0]) for row in query_result] def __call__(self, query: str, max_iter: int): self.storage.create_entity({"question": query}) @@ -60,25 +42,56 @@ def __call__(self, query: str, max_iter: int): unanswered_questions = self.get_unanswered_questions(only_without_children=True) logging.info("Loaded unanswered questions: %s", unanswered_questions) - tool_input = "\n".join(f"{i}. {question}" for i, question in enumerate(unanswered_questions)) - most_pertinent_question_raw = self.question_prioritization_tool.invoke(tool_input) + question_prioritization_tool_input = { + "unanswered_questions": "\n".join( + f"{i}. {question.question}" for i, question in enumerate(unanswered_questions) + ), + "original_question": query, + } + most_pertinent_question_raw = self.question_prioritization_tool.invoke( + question_prioritization_tool_input + ).content logging.info("Most pertinent question according to the tool: %s", most_pertinent_question_raw) i, most_pertinent_question_text = most_pertinent_question_raw.split(".", 1) + i = int(i) assert i < len(unanswered_questions) most_pertinent_question = unanswered_questions[i] - assert most_pertinent_question_text.strip() == most_pertinent_question["question"].strip() + assert most_pertinent_question_text.strip() == most_pertinent_question.question.strip() logging.info("Generating new questions") + self.question_generation_tool.invoke({"question": most_pertinent_question}) if __name__ == "__main__": from pathlib import Path import shutil + from dotenv import load_dotenv + from motleycrew.storage import MotleyKuzuGraphStore + load_dotenv() here = Path(__file__).parent db_path = here / "research_db" shutil.rmtree(db_path, ignore_errors=True) - orchestrator = KnowledgeGainingOrchestrator(db_path=str(db_path)) + db = kuzu.Database(db_path) + storage = MotleyKuzuGraphStore( + db, node_table_schema={"question": "STRING", "answer": "STRING", "context": "STRING"} + ) + + query_tool = MotleyTool.from_langchain_tool( + Tool.from_function( + func=lambda question: [ + "Germany has consisted of many different states over the years", + "The capital of France has moved in 1815, from Lyons to Paris", + "France actually has two capitals, one in the north and one in the south", + ], + name="Query Tool", + description="Query the library for relevant information.", + args_schema=QuestionGeneratorToolInput, + ) + ) + + orchestrator = KnowledgeGainingOrchestrator(storage=storage, query_tool=query_tool) + orchestrator(query="What is the capital of France?", max_iter=5) diff --git a/examples/research_agent/question_prioritizer.py b/examples/research_agent/question_prioritizer.py index 846f9296..661b42f0 100644 --- a/examples/research_agent/question_prioritizer.py +++ b/examples/research_agent/question_prioritizer.py @@ -2,29 +2,35 @@ from motleycrew.tool import LLMTool -prompt = PromptTemplate.from_template( - """You are provided with the following list of questions: -{unanswered_questions} \n -Your task is to choose one question from the above list -that is the most pertinent to the following query:\n -'{original_question}' \n -Respond with one question out of the provided list of questions. -Return the question as it is without any edits.""" +PROMPT_TEMPLATE = PromptTemplate( + template=( + "You are provided with the following list of questions:" + " {unanswered_questions} \n" + " Your task is to choose one question from the above list" + " that is the most pertinent to the following query:\n" + " '{original_question}' \n" + " Respond with one question out of the provided list of questions." + " Return the questions as it is without any edits." + " Format your response like:\n" + " #. question" + ), + input_variables=["unanswered_questions", "original_question"], ) -prioritizer = LLMTool( - name="Question prioritizer", - description="""Takes the original question and a list of derived questions, -and selects from the latter the one mpst pertinent to the former.""", - prompt=prompt, -) + +class QuestionPrioritizerTool(LLMTool): + def __init__(self): + super().__init__( + name="Question prioritizer", + description="Takes the original question and a list of derived questions, " + "and selects from the latter the one most pertinent to the former", + prompt=PROMPT_TEMPLATE, + ) if __name__ == "__main__": q = "What color is the sky?" unanswered = ["What time of day is it?", "Who was H.P.Lovecraft?"] - out = prioritizer.invoke( - {"unanswered_questions": str(unanswered), "original_question": q} - ) + out = QuestionPrioritizerTool().invoke({"unanswered_questions": str(unanswered), "original_question": q}) print(out) print("yay!") diff --git a/examples/research_agent/question_struct.py b/examples/research_agent/question_struct.py index a460586d..5b92fd72 100644 --- a/examples/research_agent/question_struct.py +++ b/examples/research_agent/question_struct.py @@ -15,9 +15,9 @@ def serialize(self): if self.id: data["id"] = json.dumps(self.id) - if self.context: + if self.question: data["question"] = json.dumps(self.question) - if self.context: + if self.answer: data["answer"] = json.dumps(self.answer) if self.context: data["context"] = json.dumps(self.context) diff --git a/examples/research_agent/research_agent.py b/examples/research_agent/research_agent.py index fc9c6285..e69de29b 100644 --- a/examples/research_agent/research_agent.py +++ b/examples/research_agent/research_agent.py @@ -1,116 +0,0 @@ -import logging -import sys -import kuzu - -from langchain.prompts import PromptTemplate -from langchain.tools import Tool - -from motleycrew import MotleyTool -from motleycrew.storage import MotleyKuzuGraphStore -from motleycrew.tool.llm_tool import LLMTool - -from question_struct import Question -from question_generator import QuestionGeneratorTool -from question_generator import QuestionGeneratorToolInput - -logging.basicConfig(stream=sys.stdout, level=logging.INFO) - - -QUESTION_PRIORITIZATION_TEMPLATE = PromptTemplate( - template=( - "You are provided with the following list of questions:" - " {unanswered_questions} \n" - " Your task is to choose one question from the above list" - " that is the most pertinent to the following query:\n" - " '{original_question}' \n" - " Respond with one question out of the provided list of questions." - " Return the questions as it is without any edits." - " Format your response like:\n" - " #. question" - ), - input_variables=["unanswered_questions", "original_question"], -) - - -class KnowledgeGainingOrchestrator: - def __init__(self, db_path: str, query_tool: MotleyTool): - self.db = kuzu.Database(db_path) - self.storage = MotleyKuzuGraphStore( - self.db, node_table_schema={"question": "STRING", "answer": "STRING", "context": "STRING"} - ) - - self.query_tool = query_tool - self.question_prioritization_tool = LLMTool( - name="question_prioritization_tool", - description="find the most important question", - prompt=QUESTION_PRIORITIZATION_TEMPLATE, - ) - self.question_generation_tool = QuestionGeneratorTool(query_tool=query_tool, graph=self.storage) - - def get_unanswered_questions(self, only_without_children: bool = False) -> list[Question]: - if only_without_children: - query = "MATCH (n1:{}) WHERE n1.answer IS NULL AND NOT (n1)-[:{}]->(:{}) RETURN n1;".format( - self.storage.node_table_name, self.storage.rel_table_name, self.storage.node_table_name - ) - else: - query = "MATCH (n1:{}) WHERE n1.answer IS NULL RETURN n1;".format(self.storage.node_table_name) - - query_result = self.storage.run_query(query) - return [Question.deserialize(row[0]) for row in query_result] - - def __call__(self, query: str, max_iter: int): - self.storage.create_entity({"question": query}) - - for iter_n in range(max_iter): - logging.info("====== Iteration %s of %s ======", iter_n, max_iter) - - unanswered_questions = self.get_unanswered_questions(only_without_children=True) - logging.info("Loaded unanswered questions: %s", unanswered_questions) - - question_prioritization_tool_input = { - "unanswered_questions": "\n".join( - f"{i}. {question.question}" for i, question in enumerate(unanswered_questions) - ), - "original_question": query, - } - most_pertinent_question_raw = self.question_prioritization_tool.invoke( - question_prioritization_tool_input - ).content - logging.info("Most pertinent question according to the tool: %s", most_pertinent_question_raw) - - i, most_pertinent_question_text = most_pertinent_question_raw.split(".", 1) - i = int(i) - assert i < len(unanswered_questions) - - most_pertinent_question = unanswered_questions[i] - assert most_pertinent_question_text.strip() == most_pertinent_question.question.strip() - - logging.info("Generating new questions") - self.question_generation_tool.invoke({"question": most_pertinent_question}) - - -if __name__ == "__main__": - from pathlib import Path - import shutil - from dotenv import load_dotenv - - load_dotenv() - here = Path(__file__).parent - db_path = here / "research_db" - shutil.rmtree(db_path, ignore_errors=True) - - query_tool = MotleyTool.from_langchain_tool( - Tool.from_function( - func=lambda question: [ - "Germany has consisted of many different states over the years", - "The capital of France has moved in 1815, from Lyons to Paris", - "France actually has two capitals, one in the north and one in the south", - ], - name="Query Tool", - description="Query the library for relevant information.", - args_schema=QuestionGeneratorToolInput, - ) - ) - - orchestrator = KnowledgeGainingOrchestrator(db_path=str(db_path), query_tool=query_tool) - orchestrator(query="Why did Arjuna kill his step-brother?", max_iter=5) diff --git a/motleycrew/storage/graph_store.py b/motleycrew/storage/graph_store.py index fd7fe6b2..f3904007 100644 --- a/motleycrew/storage/graph_store.py +++ b/motleycrew/storage/graph_store.py @@ -3,6 +3,9 @@ class MotleyGraphStore(ABC): + node_table_name: str + rel_table_name: str + @abstractmethod def check_entity_exists(self, entity_id: int) -> bool: pass @@ -25,5 +28,11 @@ def delete_entity(self, entity_id: int) -> None: """Delete a given entity and its relations""" pass + @abstractmethod def set_property(self, entity_id: int, property_name: str, property_value: Any): pass + + @abstractmethod + def run_query(self, query: str, parameters: Optional[dict] = None) -> list[list]: + """Run a Cypher query and return the results in standard Python containers""" + pass From d219c0ccea668e423b1cf3d39bfc1c16e3fd97f2 Mon Sep 17 00:00:00 2001 From: Egor Kraev Date: Fri, 26 Apr 2024 16:21:54 +0200 Subject: [PATCH 18/20] Wrap ingestion as MotleyTool --- examples/research_agent/research_agent.py | 0 .../research_agent/research_agent_main.py | 40 ++-------- examples/research_agent/retriever_tool.py | 78 +++++++++++++++++++ 3 files changed, 85 insertions(+), 33 deletions(-) delete mode 100644 examples/research_agent/research_agent.py create mode 100644 examples/research_agent/retriever_tool.py diff --git a/examples/research_agent/research_agent.py b/examples/research_agent/research_agent.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/research_agent/research_agent_main.py b/examples/research_agent/research_agent_main.py index 3623e857..c72bdead 100644 --- a/examples/research_agent/research_agent_main.py +++ b/examples/research_agent/research_agent_main.py @@ -1,43 +1,17 @@ import os.path -import pandas as pd -from llama_index.core.node_parser import SentenceSplitter -from llama_index.embeddings.openai import OpenAIEmbedding - -from llama_index.core import ( - VectorStoreIndex, - SimpleDirectoryReader, - StorageContext, - load_index_from_storage, -) - -text_embedding_model = "text-embedding-ada-002" -embeddings = OpenAIEmbedding(model=text_embedding_model) +from examples.research_agent.retriever_tool import make_retriever_tool # check if storage already exists -PERSIST_DIR = "./storage" here = os.path.dirname(os.path.abspath(__file__)) root = os.path.realpath(os.path.join(here, "../../..")) DATA_DIR = os.path.join(root, "mahabharata/text/TinyTales") -if not os.path.exists(PERSIST_DIR): - # load the documents and create the index - documents = SimpleDirectoryReader(DATA_DIR).load_data() - index = VectorStoreIndex.from_documents( - documents, transformations=[SentenceSplitter(chunk_size=512), embeddings] - ) - # store it for later - index.storage_context.persist(persist_dir=PERSIST_DIR) -else: - # load the existing index - storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR) - index = load_index_from_storage(storage_context) -# Either way we can now query the index -query_engine = index.as_query_engine( - similarity_top_k=10, embeddings=embeddings, response_mode="tree_summarize" -) -response = query_engine.query( - "What are the most interesting facts about Arjuna?", +PERSIST_DIR = "./storage" + +retriever_tool = make_retriever_tool(DATA_DIR, PERSIST_DIR) +response2 = retriever_tool.invoke( + {"question": "What are the most interesting facts about Arjuna?"} ) -print(response) +print("yay!") diff --git a/examples/research_agent/retriever_tool.py b/examples/research_agent/retriever_tool.py new file mode 100644 index 00000000..a61daffb --- /dev/null +++ b/examples/research_agent/retriever_tool.py @@ -0,0 +1,78 @@ +import os.path + +from langchain_core.pydantic_v1 import BaseModel, Field +from langchain_core.tools import StructuredTool + +from llama_index.core.tools import RetrieverTool + + +from llama_index.core.node_parser import SentenceSplitter +from llama_index.embeddings.openai import OpenAIEmbedding + +from llama_index.core import ( + VectorStoreIndex, + SimpleDirectoryReader, + StorageContext, + load_index_from_storage, +) + +from motleycrew.tool import MotleyTool + + +def make_retriever_tool(DATA_DIR, PERSIST_DIR): + text_embedding_model = "text-embedding-ada-002" + embeddings = OpenAIEmbedding(model=text_embedding_model) + + if not os.path.exists(PERSIST_DIR): + # load the documents and create the index + documents = SimpleDirectoryReader(DATA_DIR).load_data() + index = VectorStoreIndex.from_documents( + documents, transformations=[SentenceSplitter(chunk_size=512), embeddings] + ) + # store it for later + index.storage_context.persist(persist_dir=PERSIST_DIR) + else: + # load the existing index + storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR) + index = load_index_from_storage(storage_context) + + retriever = index.as_retriever( + similarity_top_k=10, + embed_model=embeddings, + ) + + class RetrieverToolInput(BaseModel): + """Input for the Retriever Tool.""" + + question: str = Field( + description="The input question for which to retrieve relevant data." + ) + + def call_retriever(question: str) -> str: + out = retriever.retrieve(question) + return out + + retriever_tool = StructuredTool.from_function( + func=call_retriever, + name="Information retriever tool", + description="Useful for running a natural language query against a" + " knowledge base and retrieving a set of relevant documents.", + args_schema=RetrieverToolInput, + ) + return MotleyTool.from_langchain_tool(retriever_tool) + + +if __name__ == "__main__": + + # check if storage already exists + here = os.path.dirname(os.path.abspath(__file__)) + root = os.path.realpath(os.path.join(here, "../../..")) + DATA_DIR = os.path.join(root, "mahabharata/text/TinyTales") + + PERSIST_DIR = "./storage" + + retriever_tool = make_retriever_tool(DATA_DIR, PERSIST_DIR) + response2 = retriever_tool.invoke( + {"question": "What are the most interesting facts about Arjuna?"} + ) + print("yay!") From dcec705f3c6fff901eb36b27bbdabf4620e79b42 Mon Sep 17 00:00:00 2001 From: whimo Date: Sat, 27 Apr 2024 14:51:15 +0400 Subject: [PATCH 19/20] Research agent: first working implementation --- .../research_agent/answer_orchestrator.py | 86 +++++--------- examples/research_agent/question_answerer.py | 83 ++++++-------- examples/research_agent/question_generator.py | 17 ++- .../research_agent/question_orchestrator.py | 61 ++++------ .../research_agent/question_prioritizer.py | 108 +++++++++++++++--- examples/research_agent/question_struct.py | 22 +++- .../research_agent/research_agent_main.py | 59 ++++++++-- examples/research_agent/retriever_tool.py | 23 ++-- motleycrew/storage/graph_store.py | 2 +- motleycrew/storage/kuzu_graph_store.py | 12 +- 10 files changed, 277 insertions(+), 196 deletions(-) diff --git a/examples/research_agent/answer_orchestrator.py b/examples/research_agent/answer_orchestrator.py index 361b736c..0d37116c 100644 --- a/examples/research_agent/answer_orchestrator.py +++ b/examples/research_agent/answer_orchestrator.py @@ -1,92 +1,68 @@ -from llama_index.core.graph_stores.types import GraphStore - import logging -import sys import kuzu -from langchain.prompts import PromptTemplate -from langchain.tools import Tool - -from motleycrew import MotleyTool from motleycrew.storage import MotleyGraphStore -from motleycrew.tool.llm_tool import LLMTool from question_struct import Question -from question_generator import QuestionGeneratorTool -from question_generator import QuestionGeneratorToolInput from question_answerer import AnswerSubQuestionTool -logging.basicConfig(stream=sys.stdout, level=logging.INFO) - class AnswerOrchestrator: - def __init__(self, storage: MotleyGraphStore, query_tool: MotleyTool): + def __init__(self, storage: MotleyGraphStore, answer_length: int): self.storage = storage - self.query_tool = query_tool - self.question_answering_tool = AnswerSubQuestionTool(graph=self.storage) + self.question_answering_tool = AnswerSubQuestionTool(graph=self.storage, answer_length=answer_length) def get_unanswered_available_questions(self) -> list[Question]: - query = "MATCH (n1:{}) WHERE n1.answer IS NULL AND NOT (n1)-[:{}]->(:{}) RETURN n1;".format( - self.storage.node_table_name, self.storage.rel_table_name, self.storage.node_table_name - ) - - query_result = self.storage.run_query(query) + query = ( + "MATCH (n1:{}) " + "WHERE n1.answer IS NULL AND n1.context IS NOT NULL " + "AND NOT EXISTS {{MATCH (n1)-[]->(n2:{}) " + "WHERE n2.answer IS NULL AND n2.context IS NOT NULL}} " + "RETURN n1" + ).format(self.storage.node_table_name, self.storage.node_table_name) + + query_result = self.storage.run_cypher_query(query) return [Question.deserialize(row[0]) for row in query_result] - def __call__(self): + def __call__(self) -> Question | None: last_question = None while True: - questions = self.storage.get_unanswered_available_questions() + questions = self.get_unanswered_available_questions() + logging.info("Available questions: %s", questions) + if not len(questions): - return last_question + logging.info("All questions answered!") + break else: last_question = questions[0] - answerer.invoke({"question": last_question}) - return graph.retrieve(last_question.id) + logging.info("Running answerer for question %s", last_question) + self.question_answering_tool.invoke({"question": last_question}) + + if not last_question: + logging.warning("Nothing to answer!") + return + + return Question.deserialize(self.storage.get_entity(last_question.id)) if __name__ == "__main__": from pathlib import Path - import shutil from dotenv import load_dotenv from motleycrew.storage import MotleyKuzuGraphStore + from motleycrew.common.utils import configure_logging load_dotenv() + configure_logging(verbose=True) + here = Path(__file__).parent db_path = here / "research_db" - shutil.rmtree(db_path, ignore_errors=True) db = kuzu.Database(db_path) storage = MotleyKuzuGraphStore( db, node_table_schema={"question": "STRING", "answer": "STRING", "context": "STRING"} ) - query_tool = MotleyTool.from_langchain_tool( - Tool.from_function( - func=lambda question: [ - "Germany has consisted of many different states over the years", - "The capital of France has moved in 1815, from Lyons to Paris", - "France actually has two capitals, one in the north and one in the south", - ], - name="Query Tool", - description="Query the library for relevant information.", - args_schema=QuestionGeneratorToolInput, - ) - ) - - orchestrator = KnowledgeGainingOrchestrator(storage=storage, query_tool=query_tool) - orchestrator(query="Why did Arjuna kill his step-brother?", max_iter=5) - - -def answer_orchestrator(graph: GraphStore): - last_question = None - answerer = AnswerSubQuestionTool(graph=graph) - while True: - questions = graph.get_unanswered_available_questions() - if not len(questions): - return last_question - else: - last_question = questions[0] - answerer.invoke({"question": last_question}) - return graph.retrieve(last_question.id) + orchestrator = AnswerOrchestrator(storage=storage, answer_length=30) + result = orchestrator() + print(result) diff --git a/examples/research_agent/question_answerer.py b/examples/research_agent/question_answerer.py index d7a7876b..e995ca9c 100644 --- a/examples/research_agent/question_answerer.py +++ b/examples/research_agent/question_answerer.py @@ -1,29 +1,25 @@ -from typing import Optional, List, Tuple - - from langchain_core.pydantic_v1 import BaseModel, Field from langchain.prompts import PromptTemplate -from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts.base import BasePromptTemplate -from langchain_core.tools import StructuredTool, Tool +from langchain_core.tools import Tool from langchain_core.runnables import ( RunnablePassthrough, RunnableLambda, - RunnableParallel, chain, ) -from llama_index.core.graph_stores.types import GraphStore from motleycrew.tool import MotleyTool, LLMTool from motleycrew.storage import MotleyGraphStore from motleycrew.common.utils import print_passthrough +from question_struct import Question + _default_prompt = PromptTemplate.from_template( """ You are a research agent who answers complex questions with clear, crisp and detailed answers. You are provided with a question and some research notes prepared by your team. - Question: {question} \n + Question: {question_text} \n Notes: {notes} \n Your task is to answer the question entirely based on the given notes. The notes contain a list of intermediate-questions and answers that may be helpful to you in writing an answer. @@ -39,10 +35,12 @@ class AnswerSubQuestionTool(MotleyTool): def __init__( self, graph: MotleyGraphStore, + answer_length: int, prompt: str | BasePromptTemplate = None, ): langchain_tool = create_answer_question_langchain_tool( graph=graph, + answer_length=answer_length, prompt=prompt, ) @@ -52,19 +50,23 @@ def __init__( class QuestionAnswererInput(BaseModel): """Data on the question to answer.""" - question_id: int = Field( - description="Id of the question node to process.", - ) - notes: str = Field( - description="The notes that contain the sub-questions and their answers.", + question: Question = Field( + description="Question node to process.", ) - question: str = Field( - description="The question to answer.", + + +def get_subquestions(graph: MotleyGraphStore, question: Question) -> list[Question]: + query = "MATCH (n1:{})-[]->(n2:{}) WHERE n1.id = $question_id and n2.context IS NOT NULL RETURN n2".format( + graph.node_table_name, graph.node_table_name ) + query_result = graph.run_cypher_query(query, parameters={"question_id": question.id}) + return [Question.deserialize(row[0]) for row in query_result] + def create_answer_question_langchain_tool( graph: MotleyGraphStore, + answer_length: int, prompt: str | BasePromptTemplate = None, ) -> Tool: """ @@ -74,7 +76,7 @@ def create_answer_question_langchain_tool( prompt = _default_prompt subquestion_answerer = LLMTool( - prompt=prompt, + prompt=prompt.partial(answer_length=str(answer_length)), name="Question answerer", description="Tool to answer a question from notes and sub-questions", ) @@ -86,52 +88,43 @@ def create_answer_question_langchain_tool( """ @chain - def retrieve_sub_question_answers(**kwargs) -> List[Tuple[str, str]]: - """ - Retrieves the answers to the sub-questions of a given question. - """ - sub_questions = graph.get_sub_questions(kwargs["question_id"]) - out = [] - for sq in sub_questions: - if sq["answer"] is not None: - out.append((sq["question"], sq["answer"])) - return out - - @chain - def merge_notes(**kwargs) -> str: + def write_notes(input_dict: dict) -> str: """ Merges the notes and the sub-question answers. """ - notes = kwargs["notes"] - sub_question_answers = kwargs["sub_question_answers"] + question = input_dict["question"] + subquestions = get_subquestions(graph=graph, question=question) + + notes = "\n".join(question.context) notes += "\n\n" - for q, a in sub_question_answers: - notes += f"Q: {q}\nA: {a}\n\n" + for question in subquestions: + notes += f"Q: {question.question}\nA: {question.answer}\n\n" return notes @chain - def insert_answer(answer: str, question_id: int) -> None: + def insert_answer(input_dict: dict) -> None: """ Inserts the answer into the graph. """ - graph.update_properties(id=question_id, values={"answer": answer}) + question = input_dict["question"] + answer = input_dict["answer"].content + + graph.set_property( + entity_id=question.id, + property_name="answer", + property_value=answer, + ) this_chain = ( - { - "sub_question_answers": retrieve_sub_question_answers, - "input": RunnablePassthrough(), - } - | merge_notes - | { - "answer": subquestion_answerer.to_langchain_tool(), - "question_id": RunnablePassthrough(), - } + RunnablePassthrough.assign(question_text=lambda x: x["question"].question, notes=write_notes) + | RunnableLambda(print_passthrough) + | RunnablePassthrough.assign(answer=subquestion_answerer.to_langchain_tool()) | RunnableLambda(print_passthrough) | insert_answer ) langchain_tool = Tool.from_function( - func=this_chain.invoke, + func=lambda question: this_chain.invoke({"question": question}), name="Answer Sub-Question Tool", description="Answer a question based on the notes and sub-questions.", args_schema=QuestionAnswererInput, diff --git a/examples/research_agent/question_generator.py b/examples/research_agent/question_generator.py index 0ea1ecad..a3bb4573 100644 --- a/examples/research_agent/question_generator.py +++ b/examples/research_agent/question_generator.py @@ -34,8 +34,8 @@ New questions should be based only on the text and the goal Question and no other previous knowledge. You can ask up to {num_questions} new questions. -Return the questions as a json list of strings, don't return anything else -except this valid json list of strings. +Return the questions each on a new line and ending with a single question mark. +Don't return anything else except these questions. """ ) @@ -104,10 +104,18 @@ def partial_inserter(input_dict: dict): def insert_questions(input_dict) -> None: inserter = input_dict["question_inserter"]["question_inserter"][0] - questions = json.loads(input_dict["subquestions"].content) + questions_raw = input_dict["subquestions"].content + questions = [q.strip() for q in questions_raw.split("\n") if len(q.strip()) > 1] + inserter.invoke({"questions": questions}) - print("yay!") + def set_context(input_dict: dict): + context = input_dict["context"] + graph.set_property( + entity_id=input_dict["question"]["question"].id, + property_name="context", + property_value=json.dumps(input_dict["context"]), + ) # TODO: add context to question node pipeline = ( @@ -121,6 +129,7 @@ def insert_questions(input_dict) -> None: "subquestions": RunnablePassthrough.assign(question_text=lambda x: x["question"]["question"].question) | prompt.partial(num_questions=max_questions) | llm, + "context_setter": RunnableLambda(set_context), "question_inserter": RunnablePassthrough(), } | RunnableLambda(print_passthrough) diff --git a/examples/research_agent/question_orchestrator.py b/examples/research_agent/question_orchestrator.py index 8e629500..9e54b218 100644 --- a/examples/research_agent/question_orchestrator.py +++ b/examples/research_agent/question_orchestrator.py @@ -1,19 +1,12 @@ import logging -import sys -import kuzu - -from langchain.tools import Tool from motleycrew import MotleyTool from motleycrew.storage import MotleyGraphStore from question_struct import Question from question_generator import QuestionGeneratorTool -from question_generator import QuestionGeneratorToolInput from question_prioritizer import QuestionPrioritizerTool -logging.basicConfig(stream=sys.stdout, level=logging.INFO) - class KnowledgeGainingOrchestrator: def __init__(self, storage: MotleyGraphStore, query_tool: MotleyTool): @@ -30,35 +23,23 @@ def get_unanswered_questions(self, only_without_children: bool = False) -> list[ else: query = "MATCH (n1:{}) WHERE n1.answer IS NULL RETURN n1;".format(self.storage.node_table_name) - query_result = self.storage.run_query(query) + query_result = self.storage.run_cypher_query(query) return [Question.deserialize(row[0]) for row in query_result] def __call__(self, query: str, max_iter: int): - self.storage.create_entity({"question": query}) + question = Question(question=query) + self.storage.create_entity(question.serialize()) for iter_n in range(max_iter): - logging.info("====== Iteration %s of %s ======", iter_n, max_iter) + logging.info("====== Iteration %s of %s ======", iter_n + 1, max_iter) unanswered_questions = self.get_unanswered_questions(only_without_children=True) logging.info("Loaded unanswered questions: %s", unanswered_questions) - question_prioritization_tool_input = { - "unanswered_questions": "\n".join( - f"{i}. {question.question}" for i, question in enumerate(unanswered_questions) - ), - "original_question": query, - } - most_pertinent_question_raw = self.question_prioritization_tool.invoke( - question_prioritization_tool_input - ).content - logging.info("Most pertinent question according to the tool: %s", most_pertinent_question_raw) - - i, most_pertinent_question_text = most_pertinent_question_raw.split(".", 1) - i = int(i) - assert i < len(unanswered_questions) - - most_pertinent_question = unanswered_questions[i] - assert most_pertinent_question_text.strip() == most_pertinent_question.question.strip() + most_pertinent_question = self.question_prioritization_tool.invoke( + {"original_question": question, "unanswered_questions": unanswered_questions} + ) + logging.info("Most pertinent question according to the tool: %s", most_pertinent_question) logging.info("Generating new questions") self.question_generation_tool.invoke({"question": most_pertinent_question}) @@ -67,10 +48,17 @@ def __call__(self, query: str, max_iter: int): if __name__ == "__main__": from pathlib import Path import shutil + import os + import kuzu from dotenv import load_dotenv from motleycrew.storage import MotleyKuzuGraphStore + from motleycrew.common.utils import configure_logging + + from retriever_tool import make_retriever_tool load_dotenv() + configure_logging(verbose=True) + here = Path(__file__).parent db_path = here / "research_db" shutil.rmtree(db_path, ignore_errors=True) @@ -80,18 +68,11 @@ def __call__(self, query: str, max_iter: int): db, node_table_schema={"question": "STRING", "answer": "STRING", "context": "STRING"} ) - query_tool = MotleyTool.from_langchain_tool( - Tool.from_function( - func=lambda question: [ - "Germany has consisted of many different states over the years", - "The capital of France has moved in 1815, from Lyons to Paris", - "France actually has two capitals, one in the north and one in the south", - ], - name="Query Tool", - description="Query the library for relevant information.", - args_schema=QuestionGeneratorToolInput, - ) - ) + DATA_DIR = os.path.join(here, "mahabharata/text/TinyTales") + + PERSIST_DIR = "./storage" + + query_tool = make_retriever_tool(DATA_DIR, PERSIST_DIR, return_strings_only=True) orchestrator = KnowledgeGainingOrchestrator(storage=storage, query_tool=query_tool) - orchestrator(query="What is the capital of France?", max_iter=5) + orchestrator(query="Why did Arjuna kill Karna, his half-brother?", max_iter=5) diff --git a/examples/research_agent/question_prioritizer.py b/examples/research_agent/question_prioritizer.py index 661b42f0..b9dfb921 100644 --- a/examples/research_agent/question_prioritizer.py +++ b/examples/research_agent/question_prioritizer.py @@ -1,36 +1,110 @@ +from langchain_core.pydantic_v1 import BaseModel, Field from langchain.prompts import PromptTemplate +from langchain_core.prompts.base import BasePromptTemplate +from langchain_core.tools import StructuredTool +from langchain_core.runnables import ( + RunnablePassthrough, + RunnableLambda, + chain, +) +from motleycrew.tool import MotleyTool from motleycrew.tool import LLMTool +from motleycrew.common.utils import print_passthrough + +from question_struct import Question + + +class QuestionPrioritizerTool(MotleyTool): + def __init__( + self, + prompt: str | BasePromptTemplate = None, + ): + langchain_tool = create_question_prioritizer_langchain_tool(prompt=prompt) -PROMPT_TEMPLATE = PromptTemplate( + super().__init__(langchain_tool) + + +_default_prompt = PromptTemplate( template=( "You are provided with the following list of questions:" - " {unanswered_questions} \n" + " {unanswered_questions_text} \n" " Your task is to choose one question from the above list" " that is the most pertinent to the following query:\n" - " '{original_question}' \n" - " Respond with one question out of the provided list of questions." - " Return the questions as it is without any edits." - " Format your response like:\n" - " #. question" + " '{original_question_text}' \n" + " Respond with a single number the chosen question out of the provided list of questions." + " Return only the number as it is without any edits." ), input_variables=["unanswered_questions", "original_question"], ) -class QuestionPrioritizerTool(LLMTool): - def __init__(self): - super().__init__( - name="Question prioritizer", - description="Takes the original question and a list of derived questions, " - "and selects from the latter the one most pertinent to the former", - prompt=PROMPT_TEMPLATE, +class QuestionPrioritizerInput(BaseModel): + original_question: Question = Field(description="The original question.") + unanswered_questions: list[Question] = Field( + description="Questions to pick the most pertinent to the original question from.", + ) + + +def create_question_prioritizer_langchain_tool( + prompt: str | BasePromptTemplate = None, +) -> StructuredTool: + """ + Creates a LangChainTool for the AnswerSubQuestionTool. + """ + if prompt is None: + prompt = _default_prompt + + question_prioritizer = LLMTool( + prompt=prompt, + name="Question prioritizer", + description="Takes the original question and a list of derived questions, " + "and selects from the latter the one most pertinent to the former", + ) + + @chain + def get_original_question_text(input_dict: dict): + return input_dict["original_question"].question + + @chain + def format_unanswered_questions(input_dict: dict): + unanswered_questions: list[Question] = input_dict["unanswered_questions"] + return "\n".join("{}. {}".format(i + 1, question.question) for i, question in enumerate(unanswered_questions)) + + @chain + def get_most_pertinent_question(input_dict: dict): + unanswered_questions: list[Question] = input_dict["unanswered_questions"] + most_pertinent_question_id = int(input_dict["most_pertinent_question_id_message"].content.strip(" \n.")) - 1 + assert most_pertinent_question_id < len(unanswered_questions) + return unanswered_questions[most_pertinent_question_id] + + this_chain = ( + RunnablePassthrough.assign( + original_question_text=lambda x: x["original_question"].question, + unanswered_questions_text=format_unanswered_questions, ) + | RunnableLambda(print_passthrough) + | RunnablePassthrough.assign(most_pertinent_question_id_message=question_prioritizer.to_langchain_tool()) + | RunnableLambda(print_passthrough) + | get_most_pertinent_question + ) + + langchain_tool = StructuredTool.from_function( + func=lambda original_question, unanswered_questions: this_chain.invoke( + {"original_question": original_question, "unanswered_questions": unanswered_questions} + ), + name=question_prioritizer.name, + description=question_prioritizer.tool.description, + args_schema=QuestionPrioritizerInput, + ) + + return langchain_tool if __name__ == "__main__": - q = "What color is the sky?" - unanswered = ["What time of day is it?", "Who was H.P.Lovecraft?"] - out = QuestionPrioritizerTool().invoke({"unanswered_questions": str(unanswered), "original_question": q}) + q = Question(question="What color is the sky?") + unanswered = [Question(question="What time of day is it?"), Question(question="Who was H.P.Lovecraft?")] + + out = QuestionPrioritizerTool().invoke({"unanswered_questions": unanswered, "original_question": q}) print(out) print("yay!") diff --git a/examples/research_agent/question_struct.py b/examples/research_agent/question_struct.py index 5b92fd72..ed4e5268 100644 --- a/examples/research_agent/question_struct.py +++ b/examples/research_agent/question_struct.py @@ -2,6 +2,8 @@ from dataclasses import dataclass import json +REPR_CONTEXT_LENGTH_LIMIT = 30 + @dataclass class Question: @@ -10,15 +12,29 @@ class Question: answer: Optional[str] = None context: Optional[list[str]] = None + def __repr__(self): + if self.context and len(self.context): + context_repr = '", "'.join(self.context) + if len(context_repr) > REPR_CONTEXT_LENGTH_LIMIT: + context_repr = '["' + context_repr[:REPR_CONTEXT_LENGTH_LIMIT] + "...]" + else: + context_repr = '["' + context_repr + '"]' + else: + context_repr = str(self.context) + + return "Question(id={}, question={}, answer={}, context={})".format( + self.id, self.question, self.answer, context_repr + ) + def serialize(self): data = {} if self.id: - data["id"] = json.dumps(self.id) + data["id"] = self.id if self.question: - data["question"] = json.dumps(self.question) + data["question"] = self.question if self.answer: - data["answer"] = json.dumps(self.answer) + data["answer"] = self.answer if self.context: data["context"] = json.dumps(self.context) diff --git a/examples/research_agent/research_agent_main.py b/examples/research_agent/research_agent_main.py index c72bdead..efd5ebd5 100644 --- a/examples/research_agent/research_agent_main.py +++ b/examples/research_agent/research_agent_main.py @@ -1,17 +1,52 @@ -import os.path +from pathlib import Path +import shutil +import os +import kuzu +from dotenv import load_dotenv +from motleycrew.storage import MotleyKuzuGraphStore +from motleycrew.common.utils import configure_logging +from question_orchestrator import KnowledgeGainingOrchestrator +from answer_orchestrator import AnswerOrchestrator +from retriever_tool import make_retriever_tool -from examples.research_agent.retriever_tool import make_retriever_tool -# check if storage already exists -here = os.path.dirname(os.path.abspath(__file__)) -root = os.path.realpath(os.path.join(here, "../../..")) -DATA_DIR = os.path.join(root, "mahabharata/text/TinyTales") +WORKING_DIR = Path(__file__).parent +DB_PATH = WORKING_DIR / "research_db" +DATA_DIR = os.path.join(WORKING_DIR, "mahabharata/text/TinyTales") +PERSIST_DIR = WORKING_DIR / "storage" -PERSIST_DIR = "./storage" +QUESTION = "Why did Arjuna kill Karna, his half-brother?" +MAX_ITER = 2 +ANSWER_LENGTH = 200 -retriever_tool = make_retriever_tool(DATA_DIR, PERSIST_DIR) -response2 = retriever_tool.invoke( - {"question": "What are the most interesting facts about Arjuna?"} -) -print("yay!") + +def main(): + load_dotenv() + configure_logging(verbose=True) + + shutil.rmtree(DB_PATH, ignore_errors=True) + + db = kuzu.Database(DB_PATH) + storage = MotleyKuzuGraphStore( + db, node_table_schema={"question": "STRING", "answer": "STRING", "context": "STRING"} + ) + + query_tool = make_retriever_tool(DATA_DIR, PERSIST_DIR, return_strings_only=True) + + question_orchestrator = KnowledgeGainingOrchestrator(storage=storage, query_tool=query_tool) + answer_orchestrator = AnswerOrchestrator(storage=storage, answer_length=ANSWER_LENGTH) + + question_orchestrator(query=QUESTION, max_iter=MAX_ITER) + answered_question = answer_orchestrator() + + print("Question: ", answered_question.question) + print("Answer: ", answered_question.answer) + print() + print("To explore the graph:") + print(f"docker run -p 8000:8000 -v {DB_PATH}:/database --rm kuzudb/explorer:latest") + print("MATCH (A)-[r]->(B) RETURN *;") + + +if __name__ == "__main__": + main() diff --git a/examples/research_agent/retriever_tool.py b/examples/research_agent/retriever_tool.py index a61daffb..2e9d6638 100644 --- a/examples/research_agent/retriever_tool.py +++ b/examples/research_agent/retriever_tool.py @@ -3,9 +3,6 @@ from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.tools import StructuredTool -from llama_index.core.tools import RetrieverTool - - from llama_index.core.node_parser import SentenceSplitter from llama_index.embeddings.openai import OpenAIEmbedding @@ -17,9 +14,10 @@ ) from motleycrew.tool import MotleyTool +from question_struct import Question -def make_retriever_tool(DATA_DIR, PERSIST_DIR): +def make_retriever_tool(DATA_DIR, PERSIST_DIR, return_strings_only: bool = False): text_embedding_model = "text-embedding-ada-002" embeddings = OpenAIEmbedding(model=text_embedding_model) @@ -44,12 +42,12 @@ def make_retriever_tool(DATA_DIR, PERSIST_DIR): class RetrieverToolInput(BaseModel): """Input for the Retriever Tool.""" - question: str = Field( - description="The input question for which to retrieve relevant data." - ) + question: Question = Field(description="The input question for which to retrieve relevant data.") - def call_retriever(question: str) -> str: - out = retriever.retrieve(question) + def call_retriever(question: Question) -> list: + out = retriever.retrieve(question.question) + if return_strings_only: + return [node.text for node in out] return out retriever_tool = StructuredTool.from_function( @@ -66,13 +64,12 @@ def call_retriever(question: str) -> str: # check if storage already exists here = os.path.dirname(os.path.abspath(__file__)) - root = os.path.realpath(os.path.join(here, "../../..")) - DATA_DIR = os.path.join(root, "mahabharata/text/TinyTales") + DATA_DIR = os.path.join(here, "mahabharata/text/TinyTales") PERSIST_DIR = "./storage" retriever_tool = make_retriever_tool(DATA_DIR, PERSIST_DIR) response2 = retriever_tool.invoke( - {"question": "What are the most interesting facts about Arjuna?"} + {"question": Question(question="What are the most interesting facts about Arjuna?")} ) - print("yay!") + print(response2) diff --git a/motleycrew/storage/graph_store.py b/motleycrew/storage/graph_store.py index f3904007..315cc24c 100644 --- a/motleycrew/storage/graph_store.py +++ b/motleycrew/storage/graph_store.py @@ -33,6 +33,6 @@ def set_property(self, entity_id: int, property_name: str, property_value: Any): pass @abstractmethod - def run_query(self, query: str, parameters: Optional[dict] = None) -> list[list]: + def run_cypher_query(self, query: str, parameters: Optional[dict] = None) -> list[list]: """Run a Cypher query and return the results in standard Python containers""" pass diff --git a/motleycrew/storage/kuzu_graph_store.py b/motleycrew/storage/kuzu_graph_store.py index 31ecbe44..8477ec3e 100644 --- a/motleycrew/storage/kuzu_graph_store.py +++ b/motleycrew/storage/kuzu_graph_store.py @@ -136,12 +136,12 @@ def set_property(self, entity_id: int, property_name: str, property_value: Any): query = """ MATCH (n1:{}) WHERE n1.id = $entity_id - SET n1.{} = $property_value; + SET n1.{} = $property_value RETURN n1; """ prepared_statement = self.connection.prepare(query.format(self.node_table_name, property_name)) self.connection.execute(prepared_statement, {"entity_id": entity_id, "property_value": property_value}) - def run_query(self, query: str, parameters: Optional[dict] = None) -> list[list]: + def run_cypher_query(self, query: str, parameters: Optional[dict] = None) -> list[list]: """Run a Cypher query and return the results""" query_result = self.connection.execute(query=query, parameters=parameters) retval = [] @@ -193,12 +193,12 @@ def from_dict(cls, config_dict: Dict[str, Any]) -> "MotleyKuzuGraphStore": IS_SUBQUESTION_PREDICATE = "is_subquestion" - q1_id = graph_store.create_entity({"question": "q1"}) + q1_id = graph_store.create_entity({"question": "q1"})["id"] assert graph_store.get_entity(q1_id)["question"] == "q1" - q2_id = graph_store.create_entity({"question": "q2"}) - q3_id = graph_store.create_entity({"question": "q3"}) - q4_id = graph_store.create_entity({"question": "q4"}) + q2_id = graph_store.create_entity({"question": "q2"})["id"] + q3_id = graph_store.create_entity({"question": "q3"})["id"] + q4_id = graph_store.create_entity({"question": "q4"})["id"] graph_store.create_rel(q1_id, q2_id, IS_SUBQUESTION_PREDICATE) graph_store.create_rel(q1_id, q3_id, IS_SUBQUESTION_PREDICATE) graph_store.create_rel(q3_id, q4_id, IS_SUBQUESTION_PREDICATE) From 1dc5f48dafbd6db9533bc5f07404d898ec72f101 Mon Sep 17 00:00:00 2001 From: whimo Date: Sat, 27 Apr 2024 14:51:56 +0400 Subject: [PATCH 20/20] Increase MAX_ITER in research agent example --- examples/research_agent/research_agent_main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/research_agent/research_agent_main.py b/examples/research_agent/research_agent_main.py index efd5ebd5..366a56bd 100644 --- a/examples/research_agent/research_agent_main.py +++ b/examples/research_agent/research_agent_main.py @@ -17,7 +17,7 @@ PERSIST_DIR = WORKING_DIR / "storage" QUESTION = "Why did Arjuna kill Karna, his half-brother?" -MAX_ITER = 2 +MAX_ITER = 20 ANSWER_LENGTH = 200