From 6f7188517c13b2359935b52dc5bb0ae78f51eabe Mon Sep 17 00:00:00 2001 From: whimo Date: Fri, 17 May 2024 16:58:29 +0400 Subject: [PATCH 1/5] Notebook example with AutoGen integration --- ...utoGen conversations with motleycrew.ipynb | 444 ++++++++++++++++++ 1 file changed, 444 insertions(+) create mode 100644 examples/Using AutoGen conversations with motleycrew.ipynb diff --git a/examples/Using AutoGen conversations with motleycrew.ipynb b/examples/Using AutoGen conversations with motleycrew.ipynb new file mode 100644 index 00000000..b55b18d8 --- /dev/null +++ b/examples/Using AutoGen conversations with motleycrew.ipynb @@ -0,0 +1,444 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "b30e4847-0dac-4fcb-a594-0adbf8688c65", + "metadata": {}, + "outputs": [], + "source": [ + "import autogen" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "6b3da0bc-d0f6-4f9c-8e69-0ad126f3a5ee", + "metadata": {}, + "outputs": [], + "source": [ + "import autogen\n", + "import os\n", + "\n", + "llm_config = {\n", + " \"config_list\": [{\"model\": \"gpt-4-turbo\", \"api_key\": os.environ[\"OPENAI_API_KEY\"]}],\n", + " \"cache_seed\": 2,\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d3f7738e", + "metadata": {}, + "outputs": [], + "source": [ + "user_proxy = autogen.UserProxyAgent(\n", + " name=\"User_proxy\",\n", + " system_message=\"A human admin.\",\n", + " code_execution_config={\n", + " \"last_n_messages\": 2,\n", + " \"work_dir\": \"groupchat\",\n", + " \"use_docker\": False,\n", + " }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n", + " human_input_mode=\"TERMINATE\",\n", + ")\n", + "coder = autogen.AssistantAgent(\n", + " name=\"Coder\",\n", + " llm_config=llm_config,\n", + ")\n", + "pm = autogen.AssistantAgent(\n", + " name=\"Product_manager\",\n", + " system_message=\"Creative in software product ideas.\",\n", + " llm_config=llm_config,\n", + ")\n", + "groupchat = autogen.GroupChat(agents=[user_proxy, coder, pm], messages=[], max_round=12)\n", + "manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "37d610dc", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.tools import Tool\n", + "\n", + "def retrieve_knowledge_by_topic(topic: str):\n", + " chat_result = user_proxy.initiate_chat(\n", + " manager,\n", + " message=f\"Find a latest paper about {topic} on arxiv \"\n", + " \"and find its potential applications in software.\")\n", + "\n", + " for message in reversed(chat_result.chat_history):\n", + " if message.get(\"content\") and \"TERMINATE\" not in message[\"content\"]:\n", + " return message[\"content\"]\n", + "\n", + "\n", + "knowledge_retrieval_tool = Tool.from_function(\n", + " retrieve_knowledge_by_topic,\n", + " name=\"Retrieve Knowledge by Topic\",\n", + " description=\"Search arxiv for the latest paper on a given topic \"\n", + " \"and find its potential applications in software.\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "534453a5", + "metadata": {}, + "outputs": [], + "source": [ + "from motleycrew import MotleyCrew\n", + "from motleycrew.agents.langchain import ReactMotleyAgent\n", + "\n", + "crew = MotleyCrew()\n", + "writer = ReactMotleyAgent(tools=[knowledge_retrieval_tool])" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "59d9d90f", + "metadata": {}, + "outputs": [], + "source": [ + "from motleycrew.tasks import SimpleTaskRecipe\n", + "\n", + "blog_post_task = SimpleTaskRecipe(\n", + " name=\"Produce blog post on latest advancements related to GPT-4\",\n", + " description=\"Using the insights provided by searching research papers, develop an engaging blog \"\n", + " \"post that highlights the most significant advancements on GPT-4 ant their applications.\\n\"\n", + " \"Your post should be informative yet accessible, catering to a tech-savvy audience.\\n\"\n", + " \"Make it sound cool, avoid complex words so it doesn't sound like AI. \"\n", + " \"Create a blog post of at least 4 paragraphs.\",\n", + " agent=writer,\n", + " )\n", + "crew.register_task_recipes([blog_post_task])" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "cf0c1a96", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:root:Multithreading is not implemented yet, will run in single thread\n", + "WARNING:root:No known Cypher type matching annotation typing.Optional[typing.Any], will use JSON string\n", + "WARNING:root:No known Cypher type matching annotation typing.List[str], will use JSON string\n", + "WARNING:root:No known Cypher type matching annotation typing.List[str], will use JSON string\n", + "WARNING:root:Lunary public key is not set, tracking will be disabled\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "Find a latest paper about GPT-4 advancements and applications on arxiv and find its potential applications in software.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mCoder\u001b[0m (to chat_manager):\n", + "\n", + "To locate the most recent paper about GPT-4 advancements and applications on arXiv, we can use Python to programmatically access the arXiv API and search for papers related to \"GPT-4\". \n", + "\n", + "Here's a Python script you can run to fetch the title and abstract of the most recent paper from arXiv on GPT-4 advancements and applications:\n", + "\n", + "```python\n", + "# filename: fetch_latest_gpt4_paper.py\n", + "import requests\n", + "from urllib.parse import quote\n", + "\n", + "def fetch_recent_gpt4_papers():\n", + " base_url = \"http://export.arxiv.org/api/query?search_query=\"\n", + " query = \"all:GPT-4+AND+ti:advancements+AND+ti:applications\"\n", + "\n", + " # Encode the query for URL\n", + " query = quote(query)\n", + " url = f\"{base_url}{query}&sortBy=submittedDate&sortOrder=descending&max_results=1\"\n", + " response = requests.get(url)\n", + "\n", + " if response.status_code == 200:\n", + " import xml.etree.ElementTree as ET\n", + " root = ET.fromstring(response.content)\n", + "\n", + " for entry in root.findall('{http://www.w3.org/2005/Atom}entry'):\n", + " title = entry.find('{http://www.w3.org/2005/Atom}title').text\n", + " abstract = entry.find('{http://www.w3.org/2005/Atom}summary').text\n", + " print(\"Title:\", title)\n", + " print(\"Abstract:\", abstract)\n", + " else:\n", + " print(\"Failed to query arXiv API. Status Code:\", response.status_code)\n", + "\n", + "if __name__ == \"__main__\":\n", + " fetch_recent_gpt4_papers()\n", + "```\n", + "\n", + "Please run this script in Python. After obtaining the paper's title and abstract, I will analyze it to determine potential applications in software.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[31m\n", + ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "exitcode: 0 (execution succeeded)\n", + "Code output: \n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", + "\n", + "It seems like there was no direct output example provided from the script execution. If you're looking to analyze the latest advancements and potential applications of GPT-4 in software, we can explore possible scenarios based on typical advancements in such technologies:\n", + "\n", + "### Potential Applications of GPT-4 in Software\n", + "\n", + "1. **Natural Language Interfaces for Applications:**\n", + " - Develop advanced natural language processing interfaces that allow users to interact with software applications, databases, or computer systems using everyday language.\n", + "\n", + "2. **Enhanced Code Generation and Software Development Tools:**\n", + " - Use GPT-4 in tools like GitHub Copilot to improve code suggestions, making software development more efficient and accessible to non-experts.\n", + " - Automate more routine coding tasks, enabling developers to focus on complex problems and creative solutions.\n", + "\n", + "3. **Automated Customer Support:**\n", + " - Implement GPT-4 for generating context-aware responses in chatbots and virtual assistants, enhancing the user experience in customer support platforms with natural, helpful conversations.\n", + "\n", + "4. **Improved Content Generation:**\n", + " - Utilize GPT-4 for automatic content creation, such as articles, blogs, reports, and marketing material, saving time and resources while maintaining high quality and relevance.\n", + "\n", + "5. **Advanced Analysis and Summary Tools:**\n", + " - Develop software features that use GPT-4 to summarize emails, documents, meetings, and more, helping professionals quickly extract key information and action points.\n", + "\n", + "6. **Educational Technologies:**\n", + " - Integrate GPT-4 into educational platforms to provide personalized tutoring, generate practice questions, explain complex concepts, and engage students in interactive learning environments.\n", + "\n", + "7. **Sentiment Analysis and Market Research:**\n", + " - Utilize GPT-4 to analyze customer feedback across various channels, allowing companies to gain insights into public sentiment and market trends.\n", + "\n", + "8. **Interactive Gaming and Storytelling:**\n", + " - Employ GPT-4 in creating adaptive, narrative-driven gaming experiences where the plot and character interactions evolve based on players' decisions.\n", + "\n", + "By leveraging GPT-4's sophisticated language understanding and generation capabilities, these potential applications could significantly enhance efficiency, customization, and user experience across various software domains.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", + "\n", + "Given the absence of specific paper details, these speculative applications provide a glimpse into how GPT-4 can be integrated across various software sectors to improve performance, user interaction, and automation. If you need any more specific implementations or have another query, feel free to ask!\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", + "\n", + "It seems there was no output from the script. If you are running this on your local machine, please ensure your script is correctly configured to hit the arXiv API and that it can successfully parse the XML response. This may involve checking your internet connection, the correctness of the API endpoint in your script, and whether your local Python environment has the necessary packages (`requests` and `xml.etree.ElementTree`) installed.\n", + "\n", + "If you received specific papers as a result of the script and want to discuss their applications or need help with any adjustments to the code or further commands, feel free to ask.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", + "\n", + "It appears there was no output from the script, which could indicate no recent papers precisely matching the query criteria (\"GPT-4\", \"advancements\", and \"applications\") were found on arXiv or there was an issue with the script execution. If you have specific areas of interest or need further adjustments in the search parameters, let me know so I can assist you further. Alternatively, if you want suggestions based on general knowledge of possible advancements and applications of GPT-4 in software, I'd be happy to discuss those!\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", + "\n", + "It appears there was no output from your script. This could be due to no recent papers specifically covering \"GPT-4 advancements and applications\" available on arXiv, or there could be an issue with the API request or script itself. \n", + "\n", + "If you want, you can modify the search query parameters in the script to broaden the search (e.g., removing some filters like \"advancements\" and \"applications\" and just searching for \"GPT-4\"). Alternatively, if you intended a response based on general knowledge about GPT-4's potential in software (without specific recent papers), I can provide insights into that based on existing information about generative models and their applications.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mCoder\u001b[0m (to chat_manager):\n", + "\n", + "It seems there is likely an issue with the specificity of the search query used in the script, or perhaps no such papers exactly matching the criteria have been recently published on arXiv. \n", + "\n", + "Given the challenge with the query, I suggest revising the script to use broader search terms or different keywords to increase the likelihood of finding relevant papers. Here is an adjusted version that searches more generally for \"GPT-4\" in the title to likely capture any relevant recent papers:\n", + "\n", + "```python\n", + "# filename: fetch_gpt4_paper_broaden_search.py\n", + "import requests\n", + "from urllib.parse import quote\n", + "\n", + "def fetch_recent_gpt4_papers():\n", + " base_url = \"http://export.arxiv.org/api/query?search_query=\"\n", + " query = \"ti:GPT-4\"\n", + "\n", + " # Encode the query for URL\n", + " query = quote(query)\n", + " url = f\"{base_url}{query}&sortBy=submittedDate&sortOrder=descending&max_results=1\"\n", + " response = requests.get(url)\n", + "\n", + " if response.status_code == 200:\n", + " import xml.etree.ElementTree as ET\n", + " root = ET.fromstring(response.content)\n", + "\n", + " for entry in root.findall('{http://www.w3.org/2005/Atom}entry'):\n", + " title = entry.find('{http://www.w3.org/2005/Atom}title').text\n", + " abstract = entry.find('{http://www.w3.org/2005/Atom}summary').text\n", + " print(\"Title:\", title)\n", + " print(\"Abstract:\", abstract)\n", + " else:\n", + " print(\"Failed to query arXiv API. Status Code:\", response.status_code)\n", + "\n", + "if __name__ == \"__main__\":\n", + " fetch_recent_gpt4_papers()\n", + "```\n", + "\n", + "Please try running this updated version. This should broaden the criteria enough to obtain results from arXiv more effectively.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[31m\n", + ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "exitcode: 0 (execution succeeded)\n", + "Code output: \n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", + "\n", + "It seems like there was still no output from the updated script. This could suggest a few possibilities: no papers exactly matching the term \"GPT-4\" in their title have been recently posted, there may be a technical issue with executing the script or fetching data from arXiv, or the connectivity with the arXiv API might be disrupted.\n", + "\n", + "Here's a checklist to troubleshoot:\n", + "\n", + "1. **API Accessibility**: Ensure there is no network issue or restriction blocking access to the arXiv API.\n", + "2. **Query Flexibility**: You might want to further simplify the query. For instance, the search could be expanded beyond just the title or could include related terms like \"deep learning\" or \"natural language processing\".\n", + "3. **Check Response**: Print out the entire response from the API to debug if it's an issue with parsing the XML or the API call itself.\n", + "4. **Alternate Methods**: If the script consistently fails to fetch data, consider manually searching on the arXiv website to ensure that the service is operational and there are papers available that meet your criteria.\n", + "\n", + "If troubleshooting doesn't resolve the issue, or if manual search is preferable at this point, please visit [the arXiv website](https://arxiv.org/) directly and use their search functionality to find papers on GPT-4 or related topics.\n", + "\n", + "Feel free to ask for further assistance or additional details on potential applications based on general knowledge of advancements in GPT technologies!\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", + "\n", + "It appears there was again no output from the script, perhaps due to the absence of returning relevant papers, issues with the network connectivity, API access, or other unforeseen errors. If you're running this script locally, please make sure your environment is properly set to reach external APIs and correctly parse their responses.\n", + "\n", + "Meanwhile, to continue with the initial objective of exploring potential applications of GPT-4 in software based on known capabilities, here's a theoretical glance:\n", + "\n", + "### Potential Applications of GPT-4 in Software\n", + "\n", + "1. **Advanced AI in Customer Service:**\n", + " GPT-4 could power more nuanced and contextually aware customer service chatbots, reducing the need for human intervention in routine queries and improving customer experience.\n", + "\n", + "2. **AI-Powered Code Completion and Review:**\n", + " Leveraging GPT-4 in integrated development environments (IDEs) could significantly enhance features like code completion, bug detection, and even provide on-the-fly code optimization suggestions.\n", + "\n", + "3. **Enhanced Content Creation Tools:**\n", + " Applications equipped with GPT-4 could assist content creators in generating written content, video scripts, or advertising material with greater linguistic fluency and creativity.\n", + "\n", + "4. **Dynamic E-Learning Platforms:**\n", + " E-learning systems can utilize GPT-4 to offer highly personalized learning experiences, adaptively generating learning content and assessments based on individual student performance and learning pace.\n", + "\n", + "5. **Automated Data Analysis and Reporting:**\n", + " Software tools equipped with GPT-4 could automate the analysis of large datasets and generate insightful, easy-to-understand reports, significantly speeding up the data analysis process.\n", + "\n", + "6. **Interactive Entertainment and Gaming:**\n", + " GPT-4 can redefine interactive story-driven games by dynamically generating dialogues and narrative paths based on player choices, creating unique gaming experiences for each user.\n", + "\n", + "7. **Simulations and Training:**\n", + " GPT-4 could play a pivotal role in simulations for training medical professionals, customer service agents, and more, providing realistic interactions and responses based on extensive training data.\n", + "\n", + "These applications are based on the capabilities typically associated with advanced language models like GPT-4. For real-world applications and more tailored insights, it's beneficial to have access to specific research or use cases as found in publications like those sought from arXiv. If you have other queries or need assistance on other topics, feel free to ask!\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:root:No known Cypher type matching annotation typing.Optional[typing.Any], will use JSON string\n" + ] + }, + { + "data": { + "text/plain": [ + "[Task(status=done)]" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "crew.run()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "0ae5789a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "**Exploring the Frontier of AI with GPT-4: A Leap into the Future**\n", + "\n", + "Welcome to the cutting-edge world of GPT-4, the latest iteration in the series of groundbreaking language models developed by OpenAI. As we dive into the capabilities and applications of this advanced tool, it's clear that GPT-4 is not just an incremental update but a transformative leap that is reshaping how we interact with technology.\n", + "\n", + "GPT-4 has taken the tech world by storm, primarily due to its enhanced understanding and generation of human-like text. This capability makes it an invaluable asset across various sectors. In customer service, for instance, GPT-4-powered chatbots are now more adept than ever at handling complex queries with a level of nuance and context awareness previously unattainable. This means smoother interactions for customers and less strain on human resources.\n", + "\n", + "The impact of GPT-4 extends into the realm of software development as well. Developers can rejoice as they integrate GPT-4 into their IDEs, where it assists not only in code completion but also in identifying potential bugs and optimizing code performance on the fly. This integration marks a significant step towards more efficient and less error-prone coding practices.\n", + "\n", + "For content creators, GPT-4 is nothing short of a revolution. Whether it's drafting blog posts, scripting videos, or creating engaging marketing content, GPT-4 helps streamline the creative process by generating initial drafts and suggesting improvements. This allows creators to focus more on refining their ideas and less on the mundane aspects of content generation.\n", + "\n", + "In the educational sector, GPT-4's ability to tailor content and assessments to individual learning styles and paces is transforming e-learning platforms. Students receive a more personalized learning experience, which can adapt in real-time to their educational needs, enhancing both engagement and retention rates.\n", + "\n", + "As we continue to explore and integrate GPT-4's capabilities, the potential applications seem almost limitless. From enhancing interactive entertainment and gaming to revolutionizing simulations and training for professionals, GPT-4 is setting the stage for a more interactive and responsive technological future. Stay tuned as we continue to uncover the full potential of this extraordinary AI advancement." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from IPython.display import display, Markdown\n", + "display(Markdown(blog_post_task.output))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a79da460", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 34899d5d6efd31b6ee76e4cb2c9f720f635dac37 Mon Sep 17 00:00:00 2001 From: whimo Date: Tue, 21 May 2024 01:28:00 +0400 Subject: [PATCH 2/5] Fixes related to AutoGen integration demo --- ...utoGen conversations with motleycrew.ipynb | 444 ------------------ examples/math_crewai.py | 6 +- motleycrew/tools/__init__.py | 6 +- motleycrew/tools/autogen_chat_tool.py | 73 +++ motleycrew/tools/python_repl.py | 20 +- motleycrew/tools/tool.py | 16 +- poetry.lock | 55 ++- tests/test_tools/test_repl_tool.py | 4 +- 8 files changed, 138 insertions(+), 486 deletions(-) delete mode 100644 examples/Using AutoGen conversations with motleycrew.ipynb create mode 100644 motleycrew/tools/autogen_chat_tool.py diff --git a/examples/Using AutoGen conversations with motleycrew.ipynb b/examples/Using AutoGen conversations with motleycrew.ipynb deleted file mode 100644 index b55b18d8..00000000 --- a/examples/Using AutoGen conversations with motleycrew.ipynb +++ /dev/null @@ -1,444 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "b30e4847-0dac-4fcb-a594-0adbf8688c65", - "metadata": {}, - "outputs": [], - "source": [ - "import autogen" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "6b3da0bc-d0f6-4f9c-8e69-0ad126f3a5ee", - "metadata": {}, - "outputs": [], - "source": [ - "import autogen\n", - "import os\n", - "\n", - "llm_config = {\n", - " \"config_list\": [{\"model\": \"gpt-4-turbo\", \"api_key\": os.environ[\"OPENAI_API_KEY\"]}],\n", - " \"cache_seed\": 2,\n", - "}" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d3f7738e", - "metadata": {}, - "outputs": [], - "source": [ - "user_proxy = autogen.UserProxyAgent(\n", - " name=\"User_proxy\",\n", - " system_message=\"A human admin.\",\n", - " code_execution_config={\n", - " \"last_n_messages\": 2,\n", - " \"work_dir\": \"groupchat\",\n", - " \"use_docker\": False,\n", - " }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n", - " human_input_mode=\"TERMINATE\",\n", - ")\n", - "coder = autogen.AssistantAgent(\n", - " name=\"Coder\",\n", - " llm_config=llm_config,\n", - ")\n", - "pm = autogen.AssistantAgent(\n", - " name=\"Product_manager\",\n", - " system_message=\"Creative in software product ideas.\",\n", - " llm_config=llm_config,\n", - ")\n", - "groupchat = autogen.GroupChat(agents=[user_proxy, coder, pm], messages=[], max_round=12)\n", - "manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "37d610dc", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.tools import Tool\n", - "\n", - "def retrieve_knowledge_by_topic(topic: str):\n", - " chat_result = user_proxy.initiate_chat(\n", - " manager,\n", - " message=f\"Find a latest paper about {topic} on arxiv \"\n", - " \"and find its potential applications in software.\")\n", - "\n", - " for message in reversed(chat_result.chat_history):\n", - " if message.get(\"content\") and \"TERMINATE\" not in message[\"content\"]:\n", - " return message[\"content\"]\n", - "\n", - "\n", - "knowledge_retrieval_tool = Tool.from_function(\n", - " retrieve_knowledge_by_topic,\n", - " name=\"Retrieve Knowledge by Topic\",\n", - " description=\"Search arxiv for the latest paper on a given topic \"\n", - " \"and find its potential applications in software.\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "534453a5", - "metadata": {}, - "outputs": [], - "source": [ - "from motleycrew import MotleyCrew\n", - "from motleycrew.agents.langchain import ReactMotleyAgent\n", - "\n", - "crew = MotleyCrew()\n", - "writer = ReactMotleyAgent(tools=[knowledge_retrieval_tool])" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "59d9d90f", - "metadata": {}, - "outputs": [], - "source": [ - "from motleycrew.tasks import SimpleTaskRecipe\n", - "\n", - "blog_post_task = SimpleTaskRecipe(\n", - " name=\"Produce blog post on latest advancements related to GPT-4\",\n", - " description=\"Using the insights provided by searching research papers, develop an engaging blog \"\n", - " \"post that highlights the most significant advancements on GPT-4 ant their applications.\\n\"\n", - " \"Your post should be informative yet accessible, catering to a tech-savvy audience.\\n\"\n", - " \"Make it sound cool, avoid complex words so it doesn't sound like AI. \"\n", - " \"Create a blog post of at least 4 paragraphs.\",\n", - " agent=writer,\n", - " )\n", - "crew.register_task_recipes([blog_post_task])" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "cf0c1a96", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:root:Multithreading is not implemented yet, will run in single thread\n", - "WARNING:root:No known Cypher type matching annotation typing.Optional[typing.Any], will use JSON string\n", - "WARNING:root:No known Cypher type matching annotation typing.List[str], will use JSON string\n", - "WARNING:root:No known Cypher type matching annotation typing.List[str], will use JSON string\n", - "WARNING:root:Lunary public key is not set, tracking will be disabled\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", - "\n", - "Find a latest paper about GPT-4 advancements and applications on arxiv and find its potential applications in software.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mCoder\u001b[0m (to chat_manager):\n", - "\n", - "To locate the most recent paper about GPT-4 advancements and applications on arXiv, we can use Python to programmatically access the arXiv API and search for papers related to \"GPT-4\". \n", - "\n", - "Here's a Python script you can run to fetch the title and abstract of the most recent paper from arXiv on GPT-4 advancements and applications:\n", - "\n", - "```python\n", - "# filename: fetch_latest_gpt4_paper.py\n", - "import requests\n", - "from urllib.parse import quote\n", - "\n", - "def fetch_recent_gpt4_papers():\n", - " base_url = \"http://export.arxiv.org/api/query?search_query=\"\n", - " query = \"all:GPT-4+AND+ti:advancements+AND+ti:applications\"\n", - "\n", - " # Encode the query for URL\n", - " query = quote(query)\n", - " url = f\"{base_url}{query}&sortBy=submittedDate&sortOrder=descending&max_results=1\"\n", - " response = requests.get(url)\n", - "\n", - " if response.status_code == 200:\n", - " import xml.etree.ElementTree as ET\n", - " root = ET.fromstring(response.content)\n", - "\n", - " for entry in root.findall('{http://www.w3.org/2005/Atom}entry'):\n", - " title = entry.find('{http://www.w3.org/2005/Atom}title').text\n", - " abstract = entry.find('{http://www.w3.org/2005/Atom}summary').text\n", - " print(\"Title:\", title)\n", - " print(\"Abstract:\", abstract)\n", - " else:\n", - " print(\"Failed to query arXiv API. Status Code:\", response.status_code)\n", - "\n", - "if __name__ == \"__main__\":\n", - " fetch_recent_gpt4_papers()\n", - "```\n", - "\n", - "Please run this script in Python. After obtaining the paper's title and abstract, I will analyze it to determine potential applications in software.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", - "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", - "\n", - "exitcode: 0 (execution succeeded)\n", - "Code output: \n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", - "\n", - "It seems like there was no direct output example provided from the script execution. If you're looking to analyze the latest advancements and potential applications of GPT-4 in software, we can explore possible scenarios based on typical advancements in such technologies:\n", - "\n", - "### Potential Applications of GPT-4 in Software\n", - "\n", - "1. **Natural Language Interfaces for Applications:**\n", - " - Develop advanced natural language processing interfaces that allow users to interact with software applications, databases, or computer systems using everyday language.\n", - "\n", - "2. **Enhanced Code Generation and Software Development Tools:**\n", - " - Use GPT-4 in tools like GitHub Copilot to improve code suggestions, making software development more efficient and accessible to non-experts.\n", - " - Automate more routine coding tasks, enabling developers to focus on complex problems and creative solutions.\n", - "\n", - "3. **Automated Customer Support:**\n", - " - Implement GPT-4 for generating context-aware responses in chatbots and virtual assistants, enhancing the user experience in customer support platforms with natural, helpful conversations.\n", - "\n", - "4. **Improved Content Generation:**\n", - " - Utilize GPT-4 for automatic content creation, such as articles, blogs, reports, and marketing material, saving time and resources while maintaining high quality and relevance.\n", - "\n", - "5. **Advanced Analysis and Summary Tools:**\n", - " - Develop software features that use GPT-4 to summarize emails, documents, meetings, and more, helping professionals quickly extract key information and action points.\n", - "\n", - "6. **Educational Technologies:**\n", - " - Integrate GPT-4 into educational platforms to provide personalized tutoring, generate practice questions, explain complex concepts, and engage students in interactive learning environments.\n", - "\n", - "7. **Sentiment Analysis and Market Research:**\n", - " - Utilize GPT-4 to analyze customer feedback across various channels, allowing companies to gain insights into public sentiment and market trends.\n", - "\n", - "8. **Interactive Gaming and Storytelling:**\n", - " - Employ GPT-4 in creating adaptive, narrative-driven gaming experiences where the plot and character interactions evolve based on players' decisions.\n", - "\n", - "By leveraging GPT-4's sophisticated language understanding and generation capabilities, these potential applications could significantly enhance efficiency, customization, and user experience across various software domains.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", - "\n", - "Given the absence of specific paper details, these speculative applications provide a glimpse into how GPT-4 can be integrated across various software sectors to improve performance, user interaction, and automation. If you need any more specific implementations or have another query, feel free to ask!\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", - "\n", - "It seems there was no output from the script. If you are running this on your local machine, please ensure your script is correctly configured to hit the arXiv API and that it can successfully parse the XML response. This may involve checking your internet connection, the correctness of the API endpoint in your script, and whether your local Python environment has the necessary packages (`requests` and `xml.etree.ElementTree`) installed.\n", - "\n", - "If you received specific papers as a result of the script and want to discuss their applications or need help with any adjustments to the code or further commands, feel free to ask.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", - "\n", - "It appears there was no output from the script, which could indicate no recent papers precisely matching the query criteria (\"GPT-4\", \"advancements\", and \"applications\") were found on arXiv or there was an issue with the script execution. If you have specific areas of interest or need further adjustments in the search parameters, let me know so I can assist you further. Alternatively, if you want suggestions based on general knowledge of possible advancements and applications of GPT-4 in software, I'd be happy to discuss those!\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", - "\n", - "It appears there was no output from your script. This could be due to no recent papers specifically covering \"GPT-4 advancements and applications\" available on arXiv, or there could be an issue with the API request or script itself. \n", - "\n", - "If you want, you can modify the search query parameters in the script to broaden the search (e.g., removing some filters like \"advancements\" and \"applications\" and just searching for \"GPT-4\"). Alternatively, if you intended a response based on general knowledge about GPT-4's potential in software (without specific recent papers), I can provide insights into that based on existing information about generative models and their applications.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mCoder\u001b[0m (to chat_manager):\n", - "\n", - "It seems there is likely an issue with the specificity of the search query used in the script, or perhaps no such papers exactly matching the criteria have been recently published on arXiv. \n", - "\n", - "Given the challenge with the query, I suggest revising the script to use broader search terms or different keywords to increase the likelihood of finding relevant papers. Here is an adjusted version that searches more generally for \"GPT-4\" in the title to likely capture any relevant recent papers:\n", - "\n", - "```python\n", - "# filename: fetch_gpt4_paper_broaden_search.py\n", - "import requests\n", - "from urllib.parse import quote\n", - "\n", - "def fetch_recent_gpt4_papers():\n", - " base_url = \"http://export.arxiv.org/api/query?search_query=\"\n", - " query = \"ti:GPT-4\"\n", - "\n", - " # Encode the query for URL\n", - " query = quote(query)\n", - " url = f\"{base_url}{query}&sortBy=submittedDate&sortOrder=descending&max_results=1\"\n", - " response = requests.get(url)\n", - "\n", - " if response.status_code == 200:\n", - " import xml.etree.ElementTree as ET\n", - " root = ET.fromstring(response.content)\n", - "\n", - " for entry in root.findall('{http://www.w3.org/2005/Atom}entry'):\n", - " title = entry.find('{http://www.w3.org/2005/Atom}title').text\n", - " abstract = entry.find('{http://www.w3.org/2005/Atom}summary').text\n", - " print(\"Title:\", title)\n", - " print(\"Abstract:\", abstract)\n", - " else:\n", - " print(\"Failed to query arXiv API. Status Code:\", response.status_code)\n", - "\n", - "if __name__ == \"__main__\":\n", - " fetch_recent_gpt4_papers()\n", - "```\n", - "\n", - "Please try running this updated version. This should broaden the criteria enough to obtain results from arXiv more effectively.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", - "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", - "\n", - "exitcode: 0 (execution succeeded)\n", - "Code output: \n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", - "\n", - "It seems like there was still no output from the updated script. This could suggest a few possibilities: no papers exactly matching the term \"GPT-4\" in their title have been recently posted, there may be a technical issue with executing the script or fetching data from arXiv, or the connectivity with the arXiv API might be disrupted.\n", - "\n", - "Here's a checklist to troubleshoot:\n", - "\n", - "1. **API Accessibility**: Ensure there is no network issue or restriction blocking access to the arXiv API.\n", - "2. **Query Flexibility**: You might want to further simplify the query. For instance, the search could be expanded beyond just the title or could include related terms like \"deep learning\" or \"natural language processing\".\n", - "3. **Check Response**: Print out the entire response from the API to debug if it's an issue with parsing the XML or the API call itself.\n", - "4. **Alternate Methods**: If the script consistently fails to fetch data, consider manually searching on the arXiv website to ensure that the service is operational and there are papers available that meet your criteria.\n", - "\n", - "If troubleshooting doesn't resolve the issue, or if manual search is preferable at this point, please visit [the arXiv website](https://arxiv.org/) directly and use their search functionality to find papers on GPT-4 or related topics.\n", - "\n", - "Feel free to ask for further assistance or additional details on potential applications based on general knowledge of advancements in GPT technologies!\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", - "\n", - "It appears there was again no output from the script, perhaps due to the absence of returning relevant papers, issues with the network connectivity, API access, or other unforeseen errors. If you're running this script locally, please make sure your environment is properly set to reach external APIs and correctly parse their responses.\n", - "\n", - "Meanwhile, to continue with the initial objective of exploring potential applications of GPT-4 in software based on known capabilities, here's a theoretical glance:\n", - "\n", - "### Potential Applications of GPT-4 in Software\n", - "\n", - "1. **Advanced AI in Customer Service:**\n", - " GPT-4 could power more nuanced and contextually aware customer service chatbots, reducing the need for human intervention in routine queries and improving customer experience.\n", - "\n", - "2. **AI-Powered Code Completion and Review:**\n", - " Leveraging GPT-4 in integrated development environments (IDEs) could significantly enhance features like code completion, bug detection, and even provide on-the-fly code optimization suggestions.\n", - "\n", - "3. **Enhanced Content Creation Tools:**\n", - " Applications equipped with GPT-4 could assist content creators in generating written content, video scripts, or advertising material with greater linguistic fluency and creativity.\n", - "\n", - "4. **Dynamic E-Learning Platforms:**\n", - " E-learning systems can utilize GPT-4 to offer highly personalized learning experiences, adaptively generating learning content and assessments based on individual student performance and learning pace.\n", - "\n", - "5. **Automated Data Analysis and Reporting:**\n", - " Software tools equipped with GPT-4 could automate the analysis of large datasets and generate insightful, easy-to-understand reports, significantly speeding up the data analysis process.\n", - "\n", - "6. **Interactive Entertainment and Gaming:**\n", - " GPT-4 can redefine interactive story-driven games by dynamically generating dialogues and narrative paths based on player choices, creating unique gaming experiences for each user.\n", - "\n", - "7. **Simulations and Training:**\n", - " GPT-4 could play a pivotal role in simulations for training medical professionals, customer service agents, and more, providing realistic interactions and responses based on extensive training data.\n", - "\n", - "These applications are based on the capabilities typically associated with advanced language models like GPT-4. For real-world applications and more tailored insights, it's beneficial to have access to specific research or use cases as found in publications like those sought from arXiv. If you have other queries or need assistance on other topics, feel free to ask!\n", - "\n", - "--------------------------------------------------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:root:No known Cypher type matching annotation typing.Optional[typing.Any], will use JSON string\n" - ] - }, - { - "data": { - "text/plain": [ - "[Task(status=done)]" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "crew.run()" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "0ae5789a", - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "**Exploring the Frontier of AI with GPT-4: A Leap into the Future**\n", - "\n", - "Welcome to the cutting-edge world of GPT-4, the latest iteration in the series of groundbreaking language models developed by OpenAI. As we dive into the capabilities and applications of this advanced tool, it's clear that GPT-4 is not just an incremental update but a transformative leap that is reshaping how we interact with technology.\n", - "\n", - "GPT-4 has taken the tech world by storm, primarily due to its enhanced understanding and generation of human-like text. This capability makes it an invaluable asset across various sectors. In customer service, for instance, GPT-4-powered chatbots are now more adept than ever at handling complex queries with a level of nuance and context awareness previously unattainable. This means smoother interactions for customers and less strain on human resources.\n", - "\n", - "The impact of GPT-4 extends into the realm of software development as well. Developers can rejoice as they integrate GPT-4 into their IDEs, where it assists not only in code completion but also in identifying potential bugs and optimizing code performance on the fly. This integration marks a significant step towards more efficient and less error-prone coding practices.\n", - "\n", - "For content creators, GPT-4 is nothing short of a revolution. Whether it's drafting blog posts, scripting videos, or creating engaging marketing content, GPT-4 helps streamline the creative process by generating initial drafts and suggesting improvements. This allows creators to focus more on refining their ideas and less on the mundane aspects of content generation.\n", - "\n", - "In the educational sector, GPT-4's ability to tailor content and assessments to individual learning styles and paces is transforming e-learning platforms. Students receive a more personalized learning experience, which can adapt in real-time to their educational needs, enhancing both engagement and retention rates.\n", - "\n", - "As we continue to explore and integrate GPT-4's capabilities, the potential applications seem almost limitless. From enhancing interactive entertainment and gaming to revolutionizing simulations and training for professionals, GPT-4 is setting the stage for a more interactive and responsive technological future. Stay tuned as we continue to uncover the full potential of this extraordinary AI advancement." - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "from IPython.display import display, Markdown\n", - "display(Markdown(blog_post_task.output))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a79da460", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/math_crewai.py b/examples/math_crewai.py index 2515b03e..58c31692 100644 --- a/examples/math_crewai.py +++ b/examples/math_crewai.py @@ -1,14 +1,14 @@ from dotenv import load_dotenv -from motleycrew import MotleyCrew, TaskRecipe +from motleycrew import MotleyCrew from motleycrew.agents.crewai import CrewAIMotleyAgent -from motleycrew.tools.python_repl import create_repl_tool +from motleycrew.tools import PythonREPLTool from motleycrew.common.utils import configure_logging def main(): """Main function of running the example.""" - repl_tool = create_repl_tool() + repl_tool = PythonREPLTool() # Define your agents with roles and goals solver1 = CrewAIMotleyAgent( diff --git a/motleycrew/tools/__init__.py b/motleycrew/tools/__init__.py index a9e7456e..ec02d850 100644 --- a/motleycrew/tools/__init__.py +++ b/motleycrew/tools/__init__.py @@ -1,3 +1,7 @@ from .tool import MotleyTool -from .llm_tool import LLMTool + +from .autogen_chat_tool import AutoGenChatTool from .image_generation import DallEImageGeneratorTool +from .llm_tool import LLMTool +from .mermaid_evaluator_tool import MermaidEvaluatorTool +from .python_repl import PythonREPLTool diff --git a/motleycrew/tools/autogen_chat_tool.py b/motleycrew/tools/autogen_chat_tool.py new file mode 100644 index 00000000..841bf47d --- /dev/null +++ b/motleycrew/tools/autogen_chat_tool.py @@ -0,0 +1,73 @@ +from typing import Optional, Type, Callable, Any + +from langchain_core.tools import StructuredTool +from langchain_core.prompts import PromptTemplate +from langchain_core.prompts.base import BasePromptTemplate +from langchain_core.pydantic_v1 import BaseModel, Field, create_model + +from autogen import ConversableAgent, ChatResult + +from motleycrew.tools import MotleyTool + + +def get_last_message(chat_result: ChatResult) -> str: + for message in reversed(chat_result.chat_history): + if message.get("content") and "TERMINATE" not in message["content"]: + return message["content"] + + +class AutoGenChatTool(MotleyTool): + def __init__( + self, + name: str, + description: str, + prompt: str | BasePromptTemplate, + initiator: ConversableAgent, + recipient: ConversableAgent, + result_extractor: Callable[[ChatResult], Any] = get_last_message, + input_schema: Optional[Type[BaseModel]] = None, + ): + langchain_tool = create_autogen_chat_tool( + name=name, + description=description, + prompt=prompt, + initiator=initiator, + recipient=recipient, + result_extractor=result_extractor, + input_schema=input_schema, + ) + super().__init__(langchain_tool) + + +def create_autogen_chat_tool( + name: str, + description: str, + prompt: str | BasePromptTemplate, + initiator: ConversableAgent, + recipient: ConversableAgent, + result_extractor: Callable[[ChatResult], Any], + input_schema: Optional[Type[BaseModel]] = None, +): + if not isinstance(prompt, BasePromptTemplate): + prompt = PromptTemplate.from_template(prompt) + + if input_schema is None: + fields = { + var: (str, Field(description=f"Input {var} for the tool.")) + for var in prompt.input_variables + } + + # Create the AutoGenChatToolInput class dynamically + input_schema = create_model("AutoGenChatToolInput", **fields) + + def run_autogen_chat(**kwargs) -> Any: + message = prompt.format(**kwargs) + chat_result = initiator.initiate_chat(recipient, message=message) + return result_extractor(chat_result) + + return StructuredTool.from_function( + func=run_autogen_chat, + name=name, + description=description, + args_schema=input_schema, + ) diff --git a/motleycrew/tools/python_repl.py b/motleycrew/tools/python_repl.py index caeae51b..55fffc8d 100644 --- a/motleycrew/tools/python_repl.py +++ b/motleycrew/tools/python_repl.py @@ -5,6 +5,12 @@ from .tool import MotleyTool +class PythonREPLTool(MotleyTool): + def __init__(self): + langchain_tool = create_repl_tool() + super().__init__(langchain_tool) + + class REPLToolInput(BaseModel): """Input for the REPL tool.""" @@ -13,12 +19,10 @@ class REPLToolInput(BaseModel): # You can create the tool to pass to an agent def create_repl_tool(): - return MotleyTool.from_langchain_tool( - Tool.from_function( - func=PythonREPL().run, - name="python_repl", - description="A Python shell. Use this to execute python commands. Input should be a valid python command. " - "MAKE SURE TO PRINT OUT THE RESULTS YOU CARE ABOUT USING `print(...)`.", - args_schema=REPLToolInput, - ) + return Tool.from_function( + func=PythonREPL().run, + name="python_repl", + description="A Python shell. Use this to execute python commands. Input should be a valid python command. " + "MAKE SURE TO PRINT OUT THE RESULTS YOU CARE ABOUT USING `print(...)`.", + args_schema=REPLToolInput, ) diff --git a/motleycrew/tools/tool.py b/motleycrew/tools/tool.py index 8ca34b12..dea4f226 100644 --- a/motleycrew/tools/tool.py +++ b/motleycrew/tools/tool.py @@ -1,4 +1,4 @@ -from typing import Union +from typing import Union, Annotated from langchain.tools import BaseTool @@ -26,7 +26,6 @@ class MotleyTool: """ def __init__(self, tool: BaseTool): - ensure_module_is_installed("llama_index") self.tool = tool @property @@ -73,3 +72,16 @@ def to_llama_index_tool(self) -> LlamaIndex__BaseTool: fn_schema=self.tool.args_schema, ) return llama_index_tool + + def to_autogen_tool(self): + fields = list(self.tool.args_schema.__fields__.values()) + if len(fields) != 1: + raise Exception("Multiple input fields are not supported in to_autogen_tool") + + field_name = fields[0].name + field_type = fields[0].annotation + + def autogen_tool_fn(input: field_type) -> str: + return self.invoke({field_name: input}) + + return autogen_tool_fn diff --git a/poetry.lock b/poetry.lock index 6853baba..542e1f9d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1231,13 +1231,13 @@ files = [ [[package]] name = "fsspec" -version = "2024.3.1" +version = "2024.5.0" description = "File-system specification" optional = true python-versions = ">=3.8" files = [ - {file = "fsspec-2024.3.1-py3-none-any.whl", hash = "sha256:918d18d41bf73f0e2b261824baeb1b124bcf771767e3a26425cd7dec3332f512"}, - {file = "fsspec-2024.3.1.tar.gz", hash = "sha256:f39780e282d7d117ffb42bb96992f8a90795e4d0fb0f661a70ca39fe9c43ded9"}, + {file = "fsspec-2024.5.0-py3-none-any.whl", hash = "sha256:e0fdbc446d67e182f49a70b82cf7889028a63588fde6b222521f10937b2b670c"}, + {file = "fsspec-2024.5.0.tar.gz", hash = "sha256:1d021b0b0f933e3b3029ed808eb400c08ba101ca2de4b3483fbc9ca23fcee94a"}, ] [package.extras] @@ -1245,7 +1245,7 @@ abfs = ["adlfs"] adl = ["adlfs"] arrow = ["pyarrow (>=1)"] dask = ["dask", "distributed"] -devel = ["pytest", "pytest-cov"] +dev = ["pre-commit", "ruff"] dropbox = ["dropbox", "dropboxdrivefs", "requests"] full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] fuse = ["fusepy"] @@ -1262,6 +1262,9 @@ s3 = ["s3fs"] sftp = ["paramiko"] smb = ["smbprotocol"] ssh = ["paramiko"] +test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"] +test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"] +test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] tqdm = ["tqdm"] [[package]] @@ -1960,20 +1963,20 @@ tiktoken = ">=0.5.2,<0.6.0" [[package]] name = "langchain-text-splitters" -version = "0.0.1" +version = "0.0.2" description = "LangChain text splitting utilities" optional = false -python-versions = ">=3.8.1,<4.0" +python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchain_text_splitters-0.0.1-py3-none-any.whl", hash = "sha256:f5b802f873f5ff6a8b9259ff34d53ed989666ef4e1582e6d1adb3b5520e3839a"}, - {file = "langchain_text_splitters-0.0.1.tar.gz", hash = "sha256:ac459fa98799f5117ad5425a9330b21961321e30bc19a2a2f9f761ddadd62aa1"}, + {file = "langchain_text_splitters-0.0.2-py3-none-any.whl", hash = "sha256:13887f32705862c1e1454213cb7834a63aae57c26fcd80346703a1d09c46168d"}, + {file = "langchain_text_splitters-0.0.2.tar.gz", hash = "sha256:ac8927dc0ba08eba702f6961c9ed7df7cead8de19a9f7101ab2b5ea34201b3c1"}, ] [package.dependencies] -langchain-core = ">=0.1.28,<0.2.0" +langchain-core = ">=0.1.28,<0.3" [package.extras] -extended-testing = ["lxml (>=5.1.0,<6.0.0)"] +extended-testing = ["beautifulsoup4 (>=4.12.3,<5.0.0)", "lxml (>=4.9.3,<6.0)"] [[package]] name = "langchainhub" @@ -2010,13 +2013,13 @@ test = ["pytest", "pytest-cov"] [[package]] name = "langsmith" -version = "0.1.57" +version = "0.1.59" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.57-py3-none-any.whl", hash = "sha256:dbd83b0944a2fbea4151f0aa053530d93fcf6784a580621bc60633cb890b57dc"}, - {file = "langsmith-0.1.57.tar.gz", hash = "sha256:4682204de19f0218029c2b8445ce2cc3485c8d0df9796b31e2ce4c9051fce365"}, + {file = "langsmith-0.1.59-py3-none-any.whl", hash = "sha256:445e3bc1d3baa1e5340cd979907a19483b9763a2ed37b863a01113d406f69345"}, + {file = "langsmith-0.1.59.tar.gz", hash = "sha256:e748a89f4dd6aa441349143e49e546c03b5dfb43376a25bfef6a5ca792fe1437"}, ] [package.dependencies] @@ -2101,13 +2104,13 @@ llama-index-llms-openai = ">=0.1.1,<0.2.0" [[package]] name = "llama-index-core" -version = "0.10.37" +version = "0.10.37.post1" description = "Interface between LLMs and your data" optional = true python-versions = "<4.0,>=3.8.1" files = [ - {file = "llama_index_core-0.10.37-py3-none-any.whl", hash = "sha256:1302ccbd267627199115cd68eee7f3a726611cc92b4a8e1a43dc679f67213664"}, - {file = "llama_index_core-0.10.37.tar.gz", hash = "sha256:b025ebda79b4e4c85269c96f0632b8f6badd1000ce458d7600b79a1de5a61a44"}, + {file = "llama_index_core-0.10.37.post1-py3-none-any.whl", hash = "sha256:e6b8a2dd4371e0326f57845a0b3d257ef4fa0d7d7de4e911fd45a5c814520606"}, + {file = "llama_index_core-0.10.37.post1.tar.gz", hash = "sha256:431305ecd7e8a524dc282f849ca4d7f7dccccd677cae55efefaf16b49d3d1aed"}, ] [package.dependencies] @@ -2332,13 +2335,13 @@ pydantic = ">=1.10" [[package]] name = "lunary" -version = "1.0.17" +version = "1.0.21" description = "Observability, analytics and evaluations for AI agents and chatbots." optional = false python-versions = "<4.0.0,>=3.8.1" files = [ - {file = "lunary-1.0.17-py3-none-any.whl", hash = "sha256:f2b43dfb7740ecbbe10950df318c286d7ad3c9e07d6501616becc904ddb69296"}, - {file = "lunary-1.0.17.tar.gz", hash = "sha256:fae8796449e4e3a8780e5d22eb33e58bde787e85fabbf5e8f64952f40a2c7b9e"}, + {file = "lunary-1.0.21-py3-none-any.whl", hash = "sha256:eb2275145eea78f3a2298464edd8ee9e1e5dc723e6ee2d916354ed0c01a20925"}, + {file = "lunary-1.0.21.tar.gz", hash = "sha256:c884b957a2054e88102b404e232fdff7472887c1dcfabd6d1e07689be0153d13"}, ] [package.dependencies] @@ -3782,13 +3785,13 @@ image = ["Pillow (>=8.0.0)"] [[package]] name = "pytest" -version = "8.2.0" +version = "8.2.1" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.2.0-py3-none-any.whl", hash = "sha256:1733f0620f6cda4095bbf0d9ff8022486e91892245bb9e7d5542c018f612f233"}, - {file = "pytest-8.2.0.tar.gz", hash = "sha256:d507d4482197eac0ba2bae2e9babf0672eb333017bcedaa5fb1a3d42c1174b3f"}, + {file = "pytest-8.2.1-py3-none-any.whl", hash = "sha256:faccc5d332b8c3719f40283d0d44aa5cf101cec36f88cde9ed8f2bc0538612b1"}, + {file = "pytest-8.2.1.tar.gz", hash = "sha256:5046e5b46d8e4cac199c373041f26be56fdb81eb4e67dc11d4e10811fc3408fd"}, ] [package.dependencies] @@ -5397,18 +5400,18 @@ multidict = ">=4.0" [[package]] name = "zipp" -version = "3.18.1" +version = "3.18.2" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.18.1-py3-none-any.whl", hash = "sha256:206f5a15f2af3dbaee80769fb7dc6f249695e940acca08dfb2a4769fe61e538b"}, - {file = "zipp-3.18.1.tar.gz", hash = "sha256:2884ed22e7d8961de1c9a05142eb69a247f120291bc0206a00a7642f09b5b715"}, + {file = "zipp-3.18.2-py3-none-any.whl", hash = "sha256:dce197b859eb796242b0622af1b8beb0a722d52aa2f57133ead08edd5bf5374e"}, + {file = "zipp-3.18.2.tar.gz", hash = "sha256:6278d9ddbcfb1f1089a88fde84481528b07b0e10474e09dcfe53dad4069fa059"}, ] [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] +testing = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [extras] crewai = ["crewai"] diff --git a/tests/test_tools/test_repl_tool.py b/tests/test_tools/test_repl_tool.py index bbca7e25..3e7fc814 100644 --- a/tests/test_tools/test_repl_tool.py +++ b/tests/test_tools/test_repl_tool.py @@ -1,9 +1,9 @@ -from motleycrew.tools.python_repl import create_repl_tool +from motleycrew.tools import PythonREPLTool class TestREPLTool: def test_repl_tool(self): - repl_tool = create_repl_tool() + repl_tool = PythonREPLTool() repl_tool_input_fields = list(repl_tool.tool.args_schema.__fields__.keys()) assert repl_tool_input_fields == ["command"] From d3ff72c12f87d52992bac98939e324fa42a68056 Mon Sep 17 00:00:00 2001 From: whimo Date: Tue, 21 May 2024 01:28:50 +0400 Subject: [PATCH 3/5] Update AutoGen demo --- .../Using AutoGen chats with motleycrew.ipynb | 756 ++++++++++++++++++ 1 file changed, 756 insertions(+) create mode 100644 examples/Using AutoGen chats with motleycrew.ipynb diff --git a/examples/Using AutoGen chats with motleycrew.ipynb b/examples/Using AutoGen chats with motleycrew.ipynb new file mode 100644 index 00000000..5bef0617 --- /dev/null +++ b/examples/Using AutoGen chats with motleycrew.ipynb @@ -0,0 +1,756 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "4a864294", + "metadata": {}, + "source": [ + "# Using AutoGen chats with motleycrew\n", + "\n", + "Microsoft AutoGen is one of the popular multi-agent frameworks. With motleycrew, you can easily integrate any AutoGen chat into your application." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "d6602c72", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from dotenv import load_dotenv\n", + "load_dotenv()" + ] + }, + { + "cell_type": "markdown", + "id": "888adcb2", + "metadata": {}, + "source": [ + "Let's create an AutoGen chat for automatic problem solving.\n", + "The code is taken from the example here: https://microsoft.github.io/autogen/docs/notebooks/agentchat_groupchat" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "6b3da0bc-d0f6-4f9c-8e69-0ad126f3a5ee", + "metadata": {}, + "outputs": [], + "source": [ + "import autogen\n", + "import os\n", + "\n", + "llm_config = {\n", + " \"config_list\": [{\"model\": \"gpt-4-turbo\", \"api_key\": os.environ[\"OPENAI_API_KEY\"]}],\n", + " \"cache_seed\": None,\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d3f7738e", + "metadata": {}, + "outputs": [], + "source": [ + "user_proxy = autogen.UserProxyAgent(\n", + " name=\"User_proxy\",\n", + " system_message=\"A human admin.\",\n", + " code_execution_config={\n", + " \"last_n_messages\": 2,\n", + " \"work_dir\": \"groupchat\",\n", + " \"use_docker\": False,\n", + " }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n", + " human_input_mode=\"TERMINATE\",\n", + ")\n", + "coder = autogen.AssistantAgent(\n", + " name=\"Coder\",\n", + " llm_config=llm_config,\n", + ")\n", + "pm = autogen.AssistantAgent(\n", + " name=\"Product_manager\",\n", + " system_message=\"Creative in software product ideas.\",\n", + " llm_config=llm_config,\n", + ")\n", + "groupchat = autogen.GroupChat(agents=[user_proxy, coder, pm], messages=[], max_round=12)\n", + "manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)" + ] + }, + { + "cell_type": "markdown", + "id": "c8480361", + "metadata": {}, + "source": [ + "If we were using plain AutoGen, we'd start the chat with something like this:\n", + "```\n", + "user_proxy.initiate_chat(\n", + " manager, message=\"Find a latest paper about gpt-4 on arxiv and find its potential applications in software.\"\n", + ")\n", + "```\n", + "\n", + "You see, the chat accepts an input and returns an output, just like a tool. \n", + "Because of that, for using the chat in motleycrew, we can utilize the built-in AutoGenChatTool. Its prompt can either be a template or a plain string. Here we are creating a tool that searches arXiv for recent papers." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "37d610dc", + "metadata": {}, + "outputs": [], + "source": [ + "from motleycrew.tools import AutoGenChatTool\n", + "\n", + "knowledge_retrieval_tool = AutoGenChatTool(\n", + " name=\"retrieve_knowledge_by_topic\",\n", + " description=\"Search arxiv for the latest paper on a given topic \"\n", + " \"and find its potential applications in software.\", # will be used in the prompt of the future agent that will use the tool\n", + " prompt=\"Find a latest paper about {topic} on arxiv \"\n", + " \"and find its potential applications in software.\", # this is the initial prompt for the AutoGen chat itself\n", + " initiator=user_proxy,\n", + " recipient=manager,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "c580e262", + "metadata": {}, + "source": [ + "We can now give the tool to any agent and solve tasks with it." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "534453a5", + "metadata": {}, + "outputs": [], + "source": [ + "from motleycrew import MotleyCrew\n", + "from motleycrew.agents.langchain.openai_tools_react import ReactOpenAIToolsAgent\n", + "\n", + "crew = MotleyCrew()\n", + "writer = ReactOpenAIToolsAgent(tools=[knowledge_retrieval_tool])" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "59d9d90f", + "metadata": {}, + "outputs": [], + "source": [ + "from motleycrew.tasks import SimpleTaskRecipe\n", + "\n", + "blog_post_task = SimpleTaskRecipe(\n", + " name=\"Produce blog post on the applications of latest advancements related to GPT-4\",\n", + " description=\"Using the insights provided by searching research papers, develop an engaging blog \"\n", + " \"post that highlights the most significant advancements on GPT-4 ant their applications.\\n\"\n", + " \"Your post should be informative yet accessible, catering to a tech-savvy audience.\\n\"\n", + " \"Make it sound cool, avoid complex words so it doesn't sound like AI. \"\n", + " \"Create a blog post of at least 4 paragraphs.\",\n", + " agent=writer,\n", + " )\n", + "crew.register_task_recipes([blog_post_task])" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "cf0c1a96", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:root:No known Cypher type matching annotation typing.Optional[typing.Any], will use JSON string\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:root:No known Cypher type matching annotation typing.List[str], will use JSON string\n", + "WARNING:root:No known Cypher type matching annotation typing.List[str], will use JSON string\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "Find a latest paper about GPT-4 on arxiv and find its potential applications in software.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mCoder\u001b[0m (to chat_manager):\n", + "\n", + "First, we'll search the ArXiv repository for the latest papers on GPT-4. We can achieve this using the `arxiv` Python package, which allows querying and fetching papers from ArXiv. \n", + "\n", + "To begin, we need to search for the most recent papers related to \"GPT-4\". Once we find the papers, I will look into the one that is most recent and check its content to find mentions of potential applications in software.\n", + "\n", + "Let's start by finding the latest paper about GPT-4 on ArXiv:\n", + "\n", + "```python\n", + "# Install the arXiv package if needed\n", + "# pip install arxiv\n", + "\n", + "import arxiv\n", + "\n", + "# Query for the latest papers on GPT-4\n", + "def find_latest_gpt4_paper():\n", + " search = arxiv.Search(\n", + " query=\"GPT-4\",\n", + " max_results=1,\n", + " sort_by=arxiv.SortCriterion.SubmittedDate\n", + " )\n", + " \n", + " # Fetch the most recent paper\n", + " for result in search.results():\n", + " print(\"Title:\", result.title)\n", + " print(\"Authors:\", \", \".join(author.name for author in result.authors))\n", + " print(\"Abstract:\", result.summary.replace('\\n', ' '))\n", + " print(\"Published:\", result.published.date())\n", + " print(\"URL:\", result.entry_id)\n", + " break\n", + "\n", + "find_latest_gpt4_paper()\n", + "```\n", + "\n", + "Please run the above Python script. It will output the title, authors, abstract, publication date, and URL of the most recent paper about GPT-4 on ArXiv. We can then analyze the abstract to identify its potential applications in software.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[31m\n", + ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "exitcode: 1 (execution failed)\n", + "Code output: \n", + "Traceback (most recent call last):\n", + " File \"\", line 4, in \n", + " import arxiv\n", + "ModuleNotFoundError: No module named 'arxiv'\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mCoder\u001b[0m (to chat_manager):\n", + "\n", + "It looks like the Python package `arxiv` is not installed, which caused the error. Instead of using the `arxiv` package, we can employ web scraping techniques to obtain the information from the ArXiv website. We'll use `requests` to fetch the page content and `BeautifulSoup` to parse it.\n", + "\n", + "Please ensure that you have the `requests` and `beautifulsoup4` libraries installed. If not, you can install them using pip.\n", + "\n", + "Here is the modified code to search for the latest GPT-4 paper on ArXiv using web scraping:\n", + "\n", + "```python\n", + "# filename: fetch_latest_gpt4_paper.py\n", + "\n", + "# Install the necessary packages if they are not installed\n", + "# pip install requests beautifulsoup4\n", + "\n", + "import requests\n", + "from bs4 import BeautifulSoup\n", + "\n", + "def fetch_latest_gpt4_paper():\n", + " # URL to search for GPT-4 related papers sorted by newest\n", + " url = \"https://arxiv.org/search/?query=GPT-4&searchtype=all&abstracts=show&order=-announced_date_first&size=1\"\n", + " \n", + " # Issue a GET request to fetch the page content\n", + " response = requests.get(url)\n", + " if response.status_code != 200:\n", + " print(\"Failed to fetch data from arXiv\")\n", + " return\n", + " \n", + " # Use BeautifulSoup to parse the HTML content\n", + " soup = BeautifulSoup(response.text, 'html.parser')\n", + " \n", + " # Locate the title, authors, abstract, published date, and link to the paper\n", + " paper = soup.select_one('li.arxiv-result')\n", + " title = paper.select_one('p.title').text.strip()\n", + " authors = paper.select_one('p.authors').text.replace('\\n', ' ').strip()\n", + " abstract = paper.select_one('span.abstract-full').text.strip()\n", + " published_date = paper.select_one('p.is-size-7').text.strip()\n", + " link = \"https://arxiv.org\" + paper.select_one('p.title').a['href']\n", + "\n", + " # Print the results\n", + " print(\"Title:\", title)\n", + " print(\"Authors:\", authors)\n", + " print(\"Abstract:\", abstract)\n", + " print(\"Published:\", published_date)\n", + " print(\"URL:\", link)\n", + "\n", + "fetch_latest_gpt4_paper()\n", + "```\n", + "\n", + "Please save this script to a file, install the required packages if needed, and run the script. It will print the details of the most recent paper on GPT-4 from ArXiv. We can then proceed to analyze the abstract for potential applications in software.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[31m\n", + ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "exitcode: 0 (execution succeeded)\n", + "Code output: \n", + "Failed to fetch data from arXiv\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", + "\n", + "It seems there was an issue fetching the data from the ArXiv website, possibly due to network issues or restrictions on scraping from their site. As an alternative approach, here are some methods you can consider to manually find the latest paper about GPT-4 on ArXiv:\n", + "\n", + "### Manual Search on ArXiv:\n", + "1. **Go to the ArXiv website**: Visit [arxiv.org](https://arxiv.org/).\n", + "2. **Use the Search Bar**: Type \"GPT-4\" into the search bar.\n", + "3. **Adjust Search Settings**: Ensure the search is set to sort by the most recent submissions first. You can adjust this in the search settings.\n", + "4. **Review Results**: Look through the titles and abstracts for the most recent entries to find relevant papers.\n", + "\n", + "### Google Scholar:\n", + "1. **Visit Google Scholar**: Go to [Google Scholar](https://scholar.google.com/).\n", + "2. **Search for \"GPT-4\"**: Ensure to add filters for recent years to get the most up-to-date research.\n", + "3. **Review and Filter Results**: Google Scholar often includes links to ArXiv preprints, helping you find the latest studies.\n", + "\n", + "### Using APIs:\n", + "If you are still interested in automating this process, using a more robust method such as the CrossRef API or the ArXiv API with proper handling of user-agent and headers might yield better results.\n", + "\n", + "If you can manually find a paper, you can share its abstract here, and we can analyze potential software applications together. Alternatively, if you have any other tasks or need further assistance, feel free to let me know!\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "Find a latest paper about GPT-4 on arxiv and find its potential applications in software.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mCoder\u001b[0m (to chat_manager):\n", + "\n", + "First, let's write a script to search for the latest papers about GPT-4 on arXiv, a well-known repository for scientific papers. The script will use the arXiv API to fetch this information. Let's retrieve the details of the most recent paper related to GPT-4.\n", + "\n", + "```python\n", + "# filename: fetch_gpt4_papers.py\n", + "import urllib.request\n", + "import urllib.parse\n", + "import xml.etree.ElementTree as ET\n", + "\n", + "# Define the base URL for the ArXiv API\n", + "base_url = 'http://export.arxiv.org/api/query?'\n", + "\n", + "# Specify search parameters: search for GPT-4, max result is 1, sorted by the latest submissions\n", + "search_params = {\n", + " \"search_query\": \"all:GPT-4\",\n", + " \"sortBy\": \"submittedDate\",\n", + " \"sortOrder\": \"descending\",\n", + " \"max_results\": 1\n", + "}\n", + "\n", + "# Encode the search parameters, make the request, and read the response\n", + "query_string = urllib.parse.urlencode(search_params)\n", + "search_url = base_url + query_string\n", + "response = urllib.request.urlopen(search_url)\n", + "response_content = response.read()\n", + "\n", + "# Parse the XML response content\n", + "root = ET.fromstring(response_content)\n", + "\n", + "# Extract and print the title, summary, and published date from the entry\n", + "for entry in root.findall('{http://www.w3.org/2005/Atom}entry'):\n", + " title = entry.find('{http://www.w3.org/2005/Atom}title').text\n", + " summary = entry.find('{http://www.w3.org/2005/Atom}summary').text\n", + " published_date = entry.find('{http://www.w3.org/2005/Atom}published').text\n", + "\n", + " # Output the results\n", + " print('Title:', title)\n", + " print('Published Date:', published_date)\n", + " print('Summary:', summary)\n", + "```\n", + "\n", + "**Instructions**: \n", + "1. Save the above script to a file named `fetch_gpt4_papers.py`.\n", + "2. Run the script using a Python interpreter. This will retrieve the latest paper about GPT-4 from arXiv.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[31m\n", + ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "exitcode: 0 (execution succeeded)\n", + "Code output: \n", + "Title: Observational Scaling Laws and the Predictability of Language Model\n", + " Performance\n", + "Published Date: 2024-05-17T17:49:44Z\n", + "Summary: Understanding how language model performance varies with scale is critical to\n", + "benchmark and algorithm development. Scaling laws are one approach to building\n", + "this understanding, but the requirement of training models across many\n", + "different scales has limited their use. We propose an alternative,\n", + "observational approach that bypasses model training and instead builds scaling\n", + "laws from ~80 publically available models. Building a single scaling law from\n", + "multiple model families is challenging due to large variations in their\n", + "training compute efficiencies and capabilities. However, we show that these\n", + "variations are consistent with a simple, generalized scaling law where language\n", + "model performance is a function of a low-dimensional capability space, and\n", + "model families only vary in their efficiency in converting training compute to\n", + "capabilities. Using this approach, we show the surprising predictability of\n", + "complex scaling phenomena: we show that several emergent phenomena follow a\n", + "smooth, sigmoidal behavior and are predictable from small models; we show that\n", + "the agent performance of models such as GPT-4 can be precisely predicted from\n", + "simpler non-agentic benchmarks; and we show how to predict the impact of\n", + "post-training interventions like Chain-of-Thought and Self-Consistency as\n", + "language model capabilities continue to improve.\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n", + "\n", + "Based on the summary of the latest paper about GPT-4 titled \"Observational Scaling Laws and the Predictability of Language Model Performance,\" here are several potential applications in software development and related fields:\n", + "\n", + "1. **Predictive Benchmarking Tools**:\n", + " - The paper discusses the predictability of language model performance using scaling laws. This could lead to the development of new software tools that predict the performance of language models like GPT-4 based on various scaling inputs. Such tools would be valuable in optimizing model configurations for specific applications without needing extensive empirical testing.\n", + "\n", + "2. **Resource Optimization Algorithms**:\n", + " - The paper's framework for predicting performance as a function of a \"capability space\" can be translated into algorithms that optimize the training compute resources for developing AI models. This could significantly reduce costs and increase the efficiency of AI training cycles in commercial software development.\n", + "\n", + "3. **Automated Model Scaling**:\n", + " - Software that automatically scales AI models' capabilities based on available compute resources could be developed. This would make AI more accessible, especially for organizations with fluctuating resource availability, by providing them with tools to dynamically adjust their model's complexity and performance.\n", + "\n", + "4. **Advanced Development Frameworks**:\n", + " - Integrating the predictive scaling laws into AI development frameworks could enable developers to anticipate the behavior and limitations of their models, facilitating more accurate and robust AI systems design.\n", + "\n", + "5. **Enhanced Debugging and Analysis Tools**:\n", + " - Using the generalized scaling law proposed in the paper, new debugging and performance analysis tools can be created, which would help in identifying bottlenecks and inefficiencies in language model training and operation.\n", + "\n", + "6. **Optimization of Intervention Techniques**:\n", + " - The ability to predict the impact of post-training interventions like the Chain-of-Thought and Self-Consistency could be encapsulated in software solutions that optimize these interventions. This would improve the effectiveness and reliability of language models in practical applications.\n", + "\n", + "These applications demonstrate how theoretical insights from academic research can be used to inform and enhance software product development, particularly in the rapidly evolving field of artificial intelligence.\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:root:No known Cypher type matching annotation typing.Optional[typing.Any], will use JSON string\n" + ] + }, + { + "data": { + "text/plain": [ + "[Task(status=done)]" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "crew.run()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "0ae5789a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "Final Answer: Welcome to the Future: Exploring the Frontiers of GPT-4\n", + "\n", + "In the rapidly evolving world of artificial intelligence, GPT-4 stands out as a beacon of innovation and potential. Recent research, particularly a fascinating study titled \"Observational Scaling Laws and the Predictability of Language Model Performance,\" sheds light on groundbreaking advancements that could redefine how we interact with technology.\n", + "\n", + "GPT-4 isn't just about understanding or generating text; it's about predicting and optimizing performance in ways previously deemed impossible. The study introduces predictive benchmarking tools that could revolutionize the development of AI models. Imagine software that can anticipate the performance of language models like GPT-4 based on various inputs, streamlining the development process and enhancing efficiency without the need for extensive testing.\n", + "\n", + "But the implications extend beyond mere prediction. The research discusses resource optimization algorithms that could significantly reduce the costs associated with AI training. These algorithms utilize a \"capability space\" to optimize the compute resources needed, ensuring that AI development is not only faster but also more economical.\n", + "\n", + "Moreover, GPT-4 could lead to the creation of automated model scaling software. Such tools would adjust AI capabilities based on available resources, making cutting-edge technology accessible to a broader range of users and organizations, regardless of their fluctuating resource availability.\n", + "\n", + "The integration of these advancements into AI development frameworks could also empower developers with better tools for debugging and performance analysis, enhancing the robustness and accuracy of AI systems. This is not just about making AI smarter; it's about making AI development smarter, more intuitive, and infinitely more creative.\n", + "\n", + "As we stand on the brink of these exciting developments, GPT-4 is not just a tool but a transformational force in the tech landscape, promising to bring about a new era of innovation and efficiency. Stay tuned, because the future of AI is here, and it's more promising than ever!" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from IPython.display import display, Markdown\n", + "display(Markdown(blog_post_task.output))" + ] + }, + { + "cell_type": "markdown", + "id": "8e7c5eb1", + "metadata": {}, + "source": [ + "The writer agent used the AutoGen chat tool to retrieve data for the post!\n", + "\n", + "Now let's do it the other way around: give tools (or agents, for that matter) from motleycrew to AutoGen agents. The code is based on https://microsoft.github.io/autogen/docs/tutorial/tool-use" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "a79da460", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + ".autogen_tool_fn(input: str) -> str>" + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from motleycrew.tools import MotleyTool\n", + "from langchain_community.tools import DuckDuckGoSearchRun\n", + "\n", + "search_tool = MotleyTool.from_supported_tool(DuckDuckGoSearchRun()) # Any tools or even motleycrew's agents can be converted to MotleyTool like this!\n", + "\n", + "# Let's first define the assistant agent that suggests tool calls.\n", + "assistant = autogen.ConversableAgent(\n", + " name=\"Assistant\",\n", + " system_message=\"You are a helpful AI assistant. \"\n", + " \"You can provide useful and up-to-date data using web search. \"\n", + " \"Return 'TERMINATE' when the task is done.\",\n", + " llm_config=llm_config,\n", + ")\n", + "\n", + "# The user proxy agent is used for interacting with the assistant agent\n", + "# and executes tool calls.\n", + "user_proxy = autogen.ConversableAgent(\n", + " name=\"User\",\n", + " llm_config=False,\n", + " is_termination_msg=lambda msg: msg.get(\"content\") is not None and \"TERMINATE\" in msg[\"content\"],\n", + " human_input_mode=\"NEVER\",\n", + ")\n", + "\n", + "# Make an autogen-compatible tool from the MotleyTool.\n", + "autogen_tool = search_tool.to_autogen_tool()\n", + "\n", + "# Register the tool signature with the assistant agent.\n", + "assistant.register_for_llm(name=\"search_tool\", description=\"Web search tool\")(autogen_tool)\n", + "\n", + "# Register the tool function with the user proxy agent.\n", + "user_proxy.register_for_execution(name=\"search_tool\")(autogen_tool)" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "6027c59f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mUser\u001b[0m (to Assistant):\n", + "\n", + "What was the first computer?\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mAssistant\u001b[0m (to User):\n", + "\n", + "\u001b[32m***** Suggested tool call (call_Na0NhkbZVtS7cnme7XRqxqUS): search_tool *****\u001b[0m\n", + "Arguments: \n", + "{\"input\":\"first computer\"}\n", + "\u001b[32m****************************************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[35m\n", + ">>>>>>>> EXECUTING FUNCTION search_tool...\u001b[0m\n", + "\u001b[33mUser\u001b[0m (to Assistant):\n", + "\n", + "\u001b[33mUser\u001b[0m (to Assistant):\n", + "\n", + "\u001b[32m***** Response from calling tool (call_Na0NhkbZVtS7cnme7XRqxqUS) *****\u001b[0m\n", + "ENIAC, the first programmable general-purpose electronic digital computer, built during World War II by the United States and completed in 1946. The project was led by John Mauchly, J. Presper Eckert, Jr., and their colleagues. ENIAC was the most powerful calculating device built to that time. Learn about the origins and evolution of computers, from the first mechanical and electric machines to the modern devices we use today. Explore the inventions, concepts, and events that shaped the computer industry and culture. Learn about the history of computing, from Charles Babbage's mechanical difference and analytical engines to the first electronic computer, the Atanasoff-Berry Computer. Discover how World War II and personal computers shaped the evolution of computing. The first commercially available personal computer was the Altair 8800, released in 1975. The Altair was a rudimentary device sold as a $439 kit that users had to assemble themselves, but it quickly gained a cult following among technology enthusiasts. Ada Lovelace, English mathematician, an associate of Charles Babbage, for whose digital computer prototype, the Analytical Engine, she created a program in 1843. She has been called the first computer programmer. Ada Lovelace Day, the second Tuesday in October, honors women's contributions to science and technology.\n", + "\u001b[32m**********************************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mAssistant\u001b[0m (to User):\n", + "\n", + "The first programmable general-purpose electronic digital computer was the ENIAC, completed in 1946. It was built during World War II by the United States and led by John Mauchly, J. Presper Eckert, Jr., and their colleagues. ENIAC stood as the most powerful calculating device of its time.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mUser\u001b[0m (to Assistant):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mAssistant\u001b[0m (to User):\n", + "\n", + "TERMINATE\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "chat_result = user_proxy.initiate_chat(assistant, message=\"What was the first computer?\")" + ] + }, + { + "cell_type": "markdown", + "id": "a62f444f", + "metadata": {}, + "source": [ + "The agent used the search tool!" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 8d849f0faae535a1ea85477ab3a836bc784b9520 Mon Sep 17 00:00:00 2001 From: whimo Date: Tue, 21 May 2024 01:35:37 +0400 Subject: [PATCH 4/5] Add autogen optional dependency --- motleycrew/common/defaults.py | 10 +- motleycrew/common/exceptions.py | 4 +- motleycrew/tools/autogen_chat_tool.py | 8 +- poetry.lock | 352 +++++++++++++++++++++++++- pyproject.toml | 1 + 5 files changed, 366 insertions(+), 9 deletions(-) diff --git a/motleycrew/common/defaults.py b/motleycrew/common/defaults.py index 9e304587..3569d043 100644 --- a/motleycrew/common/defaults.py +++ b/motleycrew/common/defaults.py @@ -10,8 +10,8 @@ class Defaults: DEFAULT_GRAPH_STORE_TYPE = GraphStoreType.KUZU - -defaults_module_install_commands = { - "crewai": "pip install crewai", - "llama_index": "pip install llama-index" -} + MODULE_INSTALL_COMMANDS = { + "crewai": "pip install crewai", + "llama_index": "pip install llama-index", + "autogen": "pip install autogen", + } diff --git a/motleycrew/common/exceptions.py b/motleycrew/common/exceptions.py index ac52c478..8f2bea48 100644 --- a/motleycrew/common/exceptions.py +++ b/motleycrew/common/exceptions.py @@ -1,4 +1,4 @@ -from motleycrew.common.defaults import defaults_module_install_commands +from motleycrew.common import Defaults class LLMFamilyNotSupported(Exception): @@ -55,7 +55,7 @@ class ModuleNotInstalledException(Exception): def __init__(self, module_name: str, install_command: str = None): self.module_name = module_name - self.install_command = install_command or defaults_module_install_commands.get( + self.install_command = install_command or Defaults.MODULE_INSTALL_COMMANDS.get( module_name, None ) diff --git a/motleycrew/tools/autogen_chat_tool.py b/motleycrew/tools/autogen_chat_tool.py index 841bf47d..fd6065db 100644 --- a/motleycrew/tools/autogen_chat_tool.py +++ b/motleycrew/tools/autogen_chat_tool.py @@ -5,9 +5,14 @@ from langchain_core.prompts.base import BasePromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field, create_model -from autogen import ConversableAgent, ChatResult +try: + from autogen import ConversableAgent, ChatResult +except ImportError: + ConversableAgent = None + ChatResult = None from motleycrew.tools import MotleyTool +from motleycrew.common.utils import ensure_module_is_installed def get_last_message(chat_result: ChatResult) -> str: @@ -27,6 +32,7 @@ def __init__( result_extractor: Callable[[ChatResult], Any] = get_last_message, input_schema: Optional[Type[BaseModel]] = None, ): + ensure_module_is_installed("autogen") langchain_tool = create_autogen_chat_tool( name=name, description=description, diff --git a/poetry.lock b/poetry.lock index 4a061915..db542523 100644 --- a/poetry.lock +++ b/poetry.lock @@ -213,6 +213,38 @@ tests = ["attrs[tests-no-zope]", "zope-interface"] tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] +[[package]] +name = "autogen" +version = "1.0.16" +description = "Automate development tasks" +optional = true +python-versions = "*" +files = [ + {file = "autogen-1.0.16-py3-none-any.whl", hash = "sha256:caa66f6b5246f80c693e2d7a45b0c7e100ff3fbcb74cf2820af1c093e9d48f9c"}, +] + +[package.dependencies] +autopep8 = "*" +docopt = "*" +PyYAML = "*" +setuptools = "*" +twine = "*" + +[[package]] +name = "autopep8" +version = "2.1.0" +description = "A tool that automatically formats Python code to conform to the PEP 8 style guide" +optional = true +python-versions = ">=3.8" +files = [ + {file = "autopep8-2.1.0-py2.py3-none-any.whl", hash = "sha256:2bb76888c5edbcafe6aabab3c47ba534f5a2c2d245c2eddced4a30c4b4946357"}, + {file = "autopep8-2.1.0.tar.gz", hash = "sha256:1fa8964e4618929488f4ec36795c7ff12924a68b8bf01366c094fc52f770b6e7"}, +] + +[package.dependencies] +pycodestyle = ">=2.11.0" +tomli = {version = "*", markers = "python_version < \"3.11\""} + [[package]] name = "babel" version = "2.15.0" @@ -227,6 +259,21 @@ files = [ [package.extras] dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] +[[package]] +name = "backports-tarfile" +version = "1.1.1" +description = "Backport of CPython tarfile module" +optional = true +python-versions = ">=3.8" +files = [ + {file = "backports.tarfile-1.1.1-py3-none-any.whl", hash = "sha256:73e0179647803d3726d82e76089d01d8549ceca9bace469953fcb4d97cf2d417"}, + {file = "backports_tarfile-1.1.1.tar.gz", hash = "sha256:9c2ef9696cb73374f7164e17fc761389393ca76777036f5aad42e8b93fcd8009"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["jaraco.test", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)"] + [[package]] name = "beautifulsoup4" version = "4.12.3" @@ -866,6 +913,60 @@ regex = ">=2023.12.25,<2024.0.0" [package.extras] tools = ["crewai-tools (>=0.0.12,<0.0.13)"] +[[package]] +name = "cryptography" +version = "42.0.7" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = true +python-versions = ">=3.7" +files = [ + {file = "cryptography-42.0.7-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:a987f840718078212fdf4504d0fd4c6effe34a7e4740378e59d47696e8dfb477"}, + {file = "cryptography-42.0.7-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bd13b5e9b543532453de08bcdc3cc7cebec6f9883e886fd20a92f26940fd3e7a"}, + {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a79165431551042cc9d1d90e6145d5d0d3ab0f2d66326c201d9b0e7f5bf43604"}, + {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a47787a5e3649008a1102d3df55424e86606c9bae6fb77ac59afe06d234605f8"}, + {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:02c0eee2d7133bdbbc5e24441258d5d2244beb31da5ed19fbb80315f4bbbff55"}, + {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5e44507bf8d14b36b8389b226665d597bc0f18ea035d75b4e53c7b1ea84583cc"}, + {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7f8b25fa616d8b846aef64b15c606bb0828dbc35faf90566eb139aa9cff67af2"}, + {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:93a3209f6bb2b33e725ed08ee0991b92976dfdcf4e8b38646540674fc7508e13"}, + {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e6b8f1881dac458c34778d0a424ae5769de30544fc678eac51c1c8bb2183e9da"}, + {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3de9a45d3b2b7d8088c3fbf1ed4395dfeff79d07842217b38df14ef09ce1d8d7"}, + {file = "cryptography-42.0.7-cp37-abi3-win32.whl", hash = "sha256:789caea816c6704f63f6241a519bfa347f72fbd67ba28d04636b7c6b7da94b0b"}, + {file = "cryptography-42.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:8cb8ce7c3347fcf9446f201dc30e2d5a3c898d009126010cbd1f443f28b52678"}, + {file = "cryptography-42.0.7-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:a3a5ac8b56fe37f3125e5b72b61dcde43283e5370827f5233893d461b7360cd4"}, + {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:779245e13b9a6638df14641d029add5dc17edbef6ec915688f3acb9e720a5858"}, + {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d563795db98b4cd57742a78a288cdbdc9daedac29f2239793071fe114f13785"}, + {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:31adb7d06fe4383226c3e963471f6837742889b3c4caa55aac20ad951bc8ffda"}, + {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:efd0bf5205240182e0f13bcaea41be4fdf5c22c5129fc7ced4a0282ac86998c9"}, + {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a9bc127cdc4ecf87a5ea22a2556cab6c7eda2923f84e4f3cc588e8470ce4e42e"}, + {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:3577d029bc3f4827dd5bf8bf7710cac13527b470bbf1820a3f394adb38ed7d5f"}, + {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2e47577f9b18723fa294b0ea9a17d5e53a227867a0a4904a1a076d1646d45ca1"}, + {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1a58839984d9cb34c855197043eaae2c187d930ca6d644612843b4fe8513c886"}, + {file = "cryptography-42.0.7-cp39-abi3-win32.whl", hash = "sha256:e6b79d0adb01aae87e8a44c2b64bc3f3fe59515280e00fb6d57a7267a2583cda"}, + {file = "cryptography-42.0.7-cp39-abi3-win_amd64.whl", hash = "sha256:16268d46086bb8ad5bf0a2b5544d8a9ed87a0e33f5e77dd3c3301e63d941a83b"}, + {file = "cryptography-42.0.7-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2954fccea107026512b15afb4aa664a5640cd0af630e2ee3962f2602693f0c82"}, + {file = "cryptography-42.0.7-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:362e7197754c231797ec45ee081f3088a27a47c6c01eff2ac83f60f85a50fe60"}, + {file = "cryptography-42.0.7-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4f698edacf9c9e0371112792558d2f705b5645076cc0aaae02f816a0171770fd"}, + {file = "cryptography-42.0.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5482e789294854c28237bba77c4c83be698be740e31a3ae5e879ee5444166582"}, + {file = "cryptography-42.0.7-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e9b2a6309f14c0497f348d08a065d52f3020656f675819fc405fb63bbcd26562"}, + {file = "cryptography-42.0.7-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d8e3098721b84392ee45af2dd554c947c32cc52f862b6a3ae982dbb90f577f14"}, + {file = "cryptography-42.0.7-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c65f96dad14f8528a447414125e1fc8feb2ad5a272b8f68477abbcc1ea7d94b9"}, + {file = "cryptography-42.0.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:36017400817987670037fbb0324d71489b6ead6231c9604f8fc1f7d008087c68"}, + {file = "cryptography-42.0.7.tar.gz", hash = "sha256:ecbfbc00bf55888edda9868a4cf927205de8499e7fabe6c050322298382953f2"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + [[package]] name = "curl-cffi" version = "0.6.4" @@ -1043,6 +1144,16 @@ files = [ {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, ] +[[package]] +name = "docopt" +version = "0.6.2" +description = "Pythonic argument parser, that will make you smile" +optional = true +python-versions = "*" +files = [ + {file = "docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"}, +] + [[package]] name = "docstring-parser" version = "0.15" @@ -1609,6 +1720,60 @@ files = [ [package.extras] colors = ["colorama (>=0.4.6)"] +[[package]] +name = "jaraco-classes" +version = "3.4.0" +description = "Utility functions for Python class constructs" +optional = true +python-versions = ">=3.8" +files = [ + {file = "jaraco.classes-3.4.0-py3-none-any.whl", hash = "sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790"}, + {file = "jaraco.classes-3.4.0.tar.gz", hash = "sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd"}, +] + +[package.dependencies] +more-itertools = "*" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"] + +[[package]] +name = "jaraco-context" +version = "5.3.0" +description = "Useful decorators and context managers" +optional = true +python-versions = ">=3.8" +files = [ + {file = "jaraco.context-5.3.0-py3-none-any.whl", hash = "sha256:3e16388f7da43d384a1a7cd3452e72e14732ac9fe459678773a3608a812bf266"}, + {file = "jaraco.context-5.3.0.tar.gz", hash = "sha256:c2f67165ce1f9be20f32f650f25d8edfc1646a8aeee48ae06fb35f90763576d2"}, +] + +[package.dependencies] +"backports.tarfile" = {version = "*", markers = "python_version < \"3.12\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["portend", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"] + +[[package]] +name = "jaraco-functools" +version = "4.0.1" +description = "Functools like those found in stdlib" +optional = true +python-versions = ">=3.8" +files = [ + {file = "jaraco.functools-4.0.1-py3-none-any.whl", hash = "sha256:3b24ccb921d6b593bdceb56ce14799204f473976e2a9d4b15b04d0f2c2326664"}, + {file = "jaraco_functools-4.0.1.tar.gz", hash = "sha256:d33fa765374c0611b52f8b3a795f8900869aa88c84769d4d1746cd68fb28c3e8"}, +] + +[package.dependencies] +more-itertools = "*" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["jaraco.classes", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"] + [[package]] name = "jedi" version = "0.19.1" @@ -1628,6 +1793,21 @@ docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alab qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] +[[package]] +name = "jeepney" +version = "0.8.0" +description = "Low-level, pure Python DBus protocol wrapper." +optional = true +python-versions = ">=3.7" +files = [ + {file = "jeepney-0.8.0-py3-none-any.whl", hash = "sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755"}, + {file = "jeepney-0.8.0.tar.gz", hash = "sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806"}, +] + +[package.extras] +test = ["async-timeout", "pytest", "pytest-asyncio (>=0.17)", "pytest-trio", "testpath", "trio"] +trio = ["async_generator", "trio"] + [[package]] name = "jinja2" version = "3.1.4" @@ -1799,6 +1979,31 @@ files = [ {file = "jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d"}, ] +[[package]] +name = "keyring" +version = "25.2.1" +description = "Store and access your passwords safely." +optional = true +python-versions = ">=3.8" +files = [ + {file = "keyring-25.2.1-py3-none-any.whl", hash = "sha256:2458681cdefc0dbc0b7eb6cf75d0b98e59f9ad9b2d4edd319d18f68bdca95e50"}, + {file = "keyring-25.2.1.tar.gz", hash = "sha256:daaffd42dbda25ddafb1ad5fec4024e5bbcfe424597ca1ca452b299861e49f1b"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""} +"jaraco.classes" = "*" +"jaraco.context" = "*" +"jaraco.functools" = "*" +jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""} +pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""} +SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""} + +[package.extras] +completion = ["shtab (>=1.1.0)"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"] + [[package]] name = "kuzu" version = "0.4.2" @@ -2618,6 +2823,17 @@ files = [ {file = "mistune-3.0.2.tar.gz", hash = "sha256:fc7f93ded930c92394ef2cb6f04a8aabab4117a91449e72dcc8dfa646a508be8"}, ] +[[package]] +name = "more-itertools" +version = "10.2.0" +description = "More routines for operating on iterables, beyond itertools" +optional = true +python-versions = ">=3.8" +files = [ + {file = "more-itertools-10.2.0.tar.gz", hash = "sha256:8fccb480c43d3e99a00087634c06dd02b0d50fbf088b380de5a41a015ec239e1"}, + {file = "more_itertools-10.2.0-py3-none-any.whl", hash = "sha256:686b06abe565edfab151cb8fd385a05651e1fdf8f0a14191e4439283421f8684"}, +] + [[package]] name = "multidict" version = "6.0.5" @@ -2960,6 +3176,31 @@ doc = ["myst-nb (>=1.0)", "numpydoc (>=1.7)", "pillow (>=9.4)", "pydata-sphinx-t extra = ["lxml (>=4.6)", "pydot (>=2.0)", "pygraphviz (>=1.12)", "sympy (>=1.10)"] test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] +[[package]] +name = "nh3" +version = "0.2.17" +description = "Python bindings to the ammonia HTML sanitization library." +optional = true +python-versions = "*" +files = [ + {file = "nh3-0.2.17-cp37-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:551672fd71d06cd828e282abdb810d1be24e1abb7ae2543a8fa36a71c1006fe9"}, + {file = "nh3-0.2.17-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:c551eb2a3876e8ff2ac63dff1585236ed5dfec5ffd82216a7a174f7c5082a78a"}, + {file = "nh3-0.2.17-cp37-abi3-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:66f17d78826096291bd264f260213d2b3905e3c7fae6dfc5337d49429f1dc9f3"}, + {file = "nh3-0.2.17-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0316c25b76289cf23be6b66c77d3608a4fdf537b35426280032f432f14291b9a"}, + {file = "nh3-0.2.17-cp37-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:22c26e20acbb253a5bdd33d432a326d18508a910e4dcf9a3316179860d53345a"}, + {file = "nh3-0.2.17-cp37-abi3-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:85cdbcca8ef10733bd31f931956f7fbb85145a4d11ab9e6742bbf44d88b7e351"}, + {file = "nh3-0.2.17-cp37-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:40015514022af31975c0b3bca4014634fa13cb5dc4dbcbc00570acc781316dcc"}, + {file = "nh3-0.2.17-cp37-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ba73a2f8d3a1b966e9cdba7b211779ad8a2561d2dba9674b8a19ed817923f65f"}, + {file = "nh3-0.2.17-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c21bac1a7245cbd88c0b0e4a420221b7bfa838a2814ee5bb924e9c2f10a1120b"}, + {file = "nh3-0.2.17-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d7a25fd8c86657f5d9d576268e3b3767c5cd4f42867c9383618be8517f0f022a"}, + {file = "nh3-0.2.17-cp37-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:c790769152308421283679a142dbdb3d1c46c79c823008ecea8e8141db1a2062"}, + {file = "nh3-0.2.17-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:b4427ef0d2dfdec10b641ed0bdaf17957eb625b2ec0ea9329b3d28806c153d71"}, + {file = "nh3-0.2.17-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a3f55fabe29164ba6026b5ad5c3151c314d136fd67415a17660b4aaddacf1b10"}, + {file = "nh3-0.2.17-cp37-abi3-win32.whl", hash = "sha256:1a814dd7bba1cb0aba5bcb9bebcc88fd801b63e21e2450ae6c52d3b3336bc911"}, + {file = "nh3-0.2.17-cp37-abi3-win_amd64.whl", hash = "sha256:1aa52a7def528297f256de0844e8dd680ee279e79583c76d6fa73a978186ddfb"}, + {file = "nh3-0.2.17.tar.gz", hash = "sha256:40d0741a19c3d645e54efba71cb0d8c475b59135c1e3c580f879ad5514cbf028"}, +] + [[package]] name = "nltk" version = "3.8.1" @@ -3419,6 +3660,20 @@ tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "pa typing = ["typing-extensions"] xmp = ["defusedxml"] +[[package]] +name = "pkginfo" +version = "1.10.0" +description = "Query metadata from sdists / bdists / installed packages." +optional = true +python-versions = ">=3.6" +files = [ + {file = "pkginfo-1.10.0-py3-none-any.whl", hash = "sha256:889a6da2ed7ffc58ab5b900d888ddce90bce912f2d2de1dc1c26f4cb9fe65097"}, + {file = "pkginfo-1.10.0.tar.gz", hash = "sha256:5df73835398d10db79f8eecd5cd86b1f6d29317589ea70796994d49399af6297"}, +] + +[package.extras] +testing = ["pytest", "pytest-cov", "wheel"] + [[package]] name = "platformdirs" version = "4.2.2" @@ -3885,6 +4140,17 @@ files = [ {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, ] +[[package]] +name = "pywin32-ctypes" +version = "0.2.2" +description = "A (partial) reimplementation of pywin32 using ctypes/cffi" +optional = true +python-versions = ">=3.6" +files = [ + {file = "pywin32-ctypes-0.2.2.tar.gz", hash = "sha256:3426e063bdd5fd4df74a14fa3cf80a0b42845a87e1d1e81f6549f9daec593a60"}, + {file = "pywin32_ctypes-0.2.2-py3-none-any.whl", hash = "sha256:bf490a1a709baf35d688fe0ecf980ed4de11d2b3e37b51e5442587a75d9957e7"}, +] + [[package]] name = "pyyaml" version = "6.0.1" @@ -4045,6 +4311,25 @@ files = [ [package.dependencies] cffi = {version = "*", markers = "implementation_name == \"pypy\""} +[[package]] +name = "readme-renderer" +version = "43.0" +description = "readme_renderer is a library for rendering readme descriptions for Warehouse" +optional = true +python-versions = ">=3.8" +files = [ + {file = "readme_renderer-43.0-py3-none-any.whl", hash = "sha256:19db308d86ecd60e5affa3b2a98f017af384678c63c88e5d4556a380e674f3f9"}, + {file = "readme_renderer-43.0.tar.gz", hash = "sha256:1818dd28140813509eeed8d62687f7cd4f7bad90d4db586001c5dc09d4fde311"}, +] + +[package.dependencies] +docutils = ">=0.13.1" +nh3 = ">=0.2.14" +Pygments = ">=2.5.1" + +[package.extras] +md = ["cmarkgfm (>=0.8.0)"] + [[package]] name = "referencing" version = "0.35.1" @@ -4183,6 +4468,34 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "requests-toolbelt" +version = "1.0.0" +description = "A utility belt for advanced users of python-requests" +optional = true +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"}, + {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"}, +] + +[package.dependencies] +requests = ">=2.0.1,<3.0.0" + +[[package]] +name = "rfc3986" +version = "2.0.0" +description = "Validating URI References per RFC 3986" +optional = true +python-versions = ">=3.7" +files = [ + {file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"}, + {file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"}, +] + +[package.extras] +idna2008 = ["idna"] + [[package]] name = "rich" version = "13.7.1" @@ -4309,6 +4622,21 @@ files = [ {file = "rpds_py-0.18.1.tar.gz", hash = "sha256:dc48b479d540770c811fbd1eb9ba2bb66951863e448efec2e2c102625328e92f"}, ] +[[package]] +name = "secretstorage" +version = "3.3.3" +description = "Python bindings to FreeDesktop.org Secret Service API" +optional = true +python-versions = ">=3.6" +files = [ + {file = "SecretStorage-3.3.3-py3-none-any.whl", hash = "sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99"}, + {file = "SecretStorage-3.3.3.tar.gz", hash = "sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77"}, +] + +[package.dependencies] +cryptography = ">=2.0" +jeepney = ">=0.6" + [[package]] name = "setuptools" version = "67.8.0" @@ -5069,6 +5397,28 @@ files = [ docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] +[[package]] +name = "twine" +version = "5.1.0" +description = "Collection of utilities for publishing packages on PyPI" +optional = true +python-versions = ">=3.8" +files = [ + {file = "twine-5.1.0-py3-none-any.whl", hash = "sha256:fe1d814395bfe50cfbe27783cb74efe93abeac3f66deaeb6c8390e4e92bacb43"}, + {file = "twine-5.1.0.tar.gz", hash = "sha256:4d74770c88c4fcaf8134d2a6a9d863e40f08255ff7d8e2acb3cbbd57d25f6e9d"}, +] + +[package.dependencies] +importlib-metadata = ">=3.6" +keyring = ">=15.1" +pkginfo = ">=1.8.1" +readme-renderer = ">=35.0" +requests = ">=2.20" +requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0" +rfc3986 = ">=1.4.0" +rich = ">=12.0.0" +urllib3 = ">=1.26.0" + [[package]] name = "typer" version = "0.9.4" @@ -5420,4 +5770,4 @@ llama-index = ["llama-index"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<=3.13" -content-hash = "b3f4f7687af043fad43fa32a706e1782fd82bcc7cb57c304feeb77261c2d1d08" +content-hash = "3092e25b3f2c18a4f50ff9bdbb30bab75a9d0fd071722eee49f4c52c87afaa5c" diff --git a/pyproject.toml b/pyproject.toml index c8746736..c97d4207 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,6 +24,7 @@ pydantic = "^2.7.1" requests = "^2.31.0" curl-cffi = "^0.6.4" httpx = "^0.27.0" +autogen = {version = "^1.0.16", optional = true} [tool.poetry.group.dev.dependencies] black = "^24.2.0" From 7e803d4748f820bf61620c5c47140bda714bad90 Mon Sep 17 00:00:00 2001 From: whimo Date: Tue, 21 May 2024 01:38:38 +0400 Subject: [PATCH 5/5] update poetry.lock --- poetry.lock | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/poetry.lock b/poetry.lock index db542523..88daea06 100644 --- a/poetry.lock +++ b/poetry.lock @@ -123,13 +123,13 @@ files = [ [[package]] name = "annotated-types" -version = "0.6.0" +version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" files = [ - {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, - {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] [[package]] @@ -2218,13 +2218,13 @@ test = ["pytest", "pytest-cov"] [[package]] name = "langsmith" -version = "0.1.59" +version = "0.1.60" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.59-py3-none-any.whl", hash = "sha256:445e3bc1d3baa1e5340cd979907a19483b9763a2ed37b863a01113d406f69345"}, - {file = "langsmith-0.1.59.tar.gz", hash = "sha256:e748a89f4dd6aa441349143e49e546c03b5dfb43376a25bfef6a5ca792fe1437"}, + {file = "langsmith-0.1.60-py3-none-any.whl", hash = "sha256:3c3520ea473de0a984237b3e9d638fdf23ef3acc5aec89a42e693225e72d6120"}, + {file = "langsmith-0.1.60.tar.gz", hash = "sha256:6a145b5454437f9e0f81525f23c4dcdbb8c07b1c91553b8f697456c418d6a599"}, ] [package.dependencies] @@ -2540,13 +2540,13 @@ pydantic = ">=1.10" [[package]] name = "lunary" -version = "1.0.21" +version = "1.0.24" description = "Observability, analytics and evaluations for AI agents and chatbots." optional = false python-versions = "<4.0.0,>=3.8.1" files = [ - {file = "lunary-1.0.21-py3-none-any.whl", hash = "sha256:eb2275145eea78f3a2298464edd8ee9e1e5dc723e6ee2d916354ed0c01a20925"}, - {file = "lunary-1.0.21.tar.gz", hash = "sha256:c884b957a2054e88102b404e232fdff7472887c1dcfabd6d1e07689be0153d13"}, + {file = "lunary-1.0.24-py3-none-any.whl", hash = "sha256:7abdad14c84d24e5d83dc8f7ad1bc213d4e1083cc3de74a3577c242fc7ca59bb"}, + {file = "lunary-1.0.24.tar.gz", hash = "sha256:f9e9bb5a810bc2a955b943439243fa5dfa833a5e502b23e8a26556d3b1d95a0b"}, ] [package.dependencies] @@ -4449,13 +4449,13 @@ files = [ [[package]] name = "requests" -version = "2.31.0" +version = "2.32.0" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.0-py3-none-any.whl", hash = "sha256:f2c3881dddb70d056c5bd7600a4fae312b2a300e39be6a118d30b90bd27262b5"}, + {file = "requests-2.32.0.tar.gz", hash = "sha256:fa5490319474c82ef1d2c9bc459d3652e3ae4ef4c4ebdd18a21145a47ca4b6b8"}, ] [package.dependencies] @@ -5770,4 +5770,4 @@ llama-index = ["llama-index"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<=3.13" -content-hash = "3092e25b3f2c18a4f50ff9bdbb30bab75a9d0fd071722eee49f4c52c87afaa5c" +content-hash = "a7426b0bfd64bc3c27bcacdccec54fbc47c3d616d4509a813b3b7231480477c4"