From d64f866bfab62e5f167d20994f0c9a1ade534e72 Mon Sep 17 00:00:00 2001 From: Dino Hensen Date: Fri, 14 Apr 2023 18:28:58 +0200 Subject: [PATCH] Convert to python module named autogpt. Also fixed the Dockerfile. Converting to module makes development easier. Fixes coverage script in CI and test imports. --- .github/workflows/ci.yml | 4 +- .gitignore | 8 +- Dockerfile | 4 +- README.md | 32 ++- {scripts => autogpt}/__init__.py | 0 scripts/main.py => autogpt/__main__.py | 18 +- {scripts => autogpt}/agent_manager.py | 2 +- {scripts => autogpt}/ai_config.py | 0 {scripts => autogpt}/ai_functions.py | 4 +- {scripts => autogpt}/browse.py | 6 +- {scripts => autogpt}/call_ai_function.py | 5 +- {scripts => autogpt}/chat.py | 8 +- {scripts => autogpt}/commands.py | 20 +- {scripts => autogpt}/config.py | 0 {scripts => autogpt}/data_ingestion.py | 6 +- {scripts => autogpt}/execute_code.py | 0 {scripts => autogpt}/file_operations.py | 0 {scripts => autogpt}/image_gen.py | 2 +- {scripts => autogpt}/json_parser.py | 8 +- {scripts => autogpt}/json_utils.py | 2 +- {scripts => autogpt}/llm_utils.py | 2 +- {scripts => autogpt}/logger.py | 6 +- {scripts => autogpt}/memory/__init__.py | 4 +- {scripts => autogpt}/memory/base.py | 2 +- {scripts => autogpt}/memory/local.py | 2 +- {scripts => autogpt}/memory/no_memory.py | 0 {scripts => autogpt}/memory/pinecone.py | 1 + {scripts => autogpt}/memory/redismem.py | 4 +- {scripts => autogpt}/prompt.py | 0 {scripts => autogpt}/promptgenerator.py | 0 {scripts => autogpt}/speak.py | 2 +- {scripts => autogpt}/spinner.py | 0 {scripts => autogpt}/token_counter.py | 0 {scripts => autogpt}/utils.py | 0 docker-compose.yml | 2 +- main.py | 2 +- scripts/agent.py | 245 +++++++++++++++++++++++ tests.py | 4 +- tests/integration/memory_tests.py | 9 +- tests/local_cache_test.py | 5 +- tests/promptgenerator_tests.py | 4 +- tests/test_config.py | 2 +- tests/test_json_parser.py | 2 +- tests/unit/json_tests.py | 13 +- tests/unit/test_browse_scrape_text.py | 2 +- 45 files changed, 352 insertions(+), 90 deletions(-) rename {scripts => autogpt}/__init__.py (100%) rename scripts/main.py => autogpt/__main__.py (98%) rename {scripts => autogpt}/agent_manager.py (97%) rename {scripts => autogpt}/ai_config.py (100%) rename {scripts => autogpt}/ai_functions.py (96%) rename {scripts => autogpt}/browse.py (97%) rename {scripts => autogpt}/call_ai_function.py (90%) rename {scripts => autogpt}/chat.py (97%) rename {scripts => autogpt}/commands.py (95%) rename {scripts => autogpt}/config.py (100%) rename {scripts => autogpt}/data_ingestion.py (95%) rename {scripts => autogpt}/execute_code.py (100%) rename {scripts => autogpt}/file_operations.py (100%) rename {scripts => autogpt}/image_gen.py (97%) rename {scripts => autogpt}/json_parser.py (95%) rename {scripts => autogpt}/json_utils.py (99%) rename {scripts => autogpt}/llm_utils.py (98%) rename {scripts => autogpt}/logger.py (98%) rename {scripts => autogpt}/memory/__init__.py (94%) rename {scripts => autogpt}/memory/base.py (93%) rename {scripts => autogpt}/memory/local.py (97%) rename {scripts => autogpt}/memory/no_memory.py (100%) rename {scripts => autogpt}/memory/pinecone.py (98%) rename {scripts => autogpt}/memory/redismem.py (97%) rename {scripts => autogpt}/prompt.py (100%) rename {scripts => autogpt}/promptgenerator.py (100%) rename {scripts => autogpt}/speak.py (99%) rename {scripts => autogpt}/spinner.py (100%) rename {scripts => autogpt}/token_counter.py (100%) rename {scripts => autogpt}/utils.py (100%) create mode 100644 scripts/agent.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0b90b55d34fe..366aaf67d789 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,11 +32,11 @@ jobs: - name: Lint with flake8 continue-on-error: false - run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302 + run: flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302 - name: Run unittest tests with coverage run: | - coverage run --source=scripts -m unittest discover tests + coverage run --source=autogpt -m unittest discover tests - name: Generate coverage report run: | diff --git a/.gitignore b/.gitignore index b0be8967f295..5a2ce371caf4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,7 @@ -scripts/keys.py -scripts/*json -scripts/node_modules/ -scripts/__pycache__/keys.cpython-310.pyc +autogpt/keys.py +autogpt/*json +autogpt/node_modules/ +autogpt/__pycache__/keys.cpython-310.pyc package-lock.json *.pyc auto_gpt_workspace/* diff --git a/Dockerfile b/Dockerfile index e776664e8483..3ae1ac1219dc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,7 +17,7 @@ COPY --chown=appuser:appuser requirements.txt . RUN pip install --no-cache-dir --user -r requirements.txt # Copy the application files -COPY --chown=appuser:appuser scripts/ . +COPY --chown=appuser:appuser autogpt/ . # Set the entrypoint -ENTRYPOINT ["python", "main.py"] \ No newline at end of file +ENTRYPOINT ["python", "-m", "autogpt"] diff --git a/README.md b/README.md index 27150fa284ed..fcf0cc3f1d31 100644 --- a/README.md +++ b/README.md @@ -119,11 +119,11 @@ pip install -r requirements.txt ## 🔧 Usage -1. Run the `main.py` Python script in your terminal: +1. Run the `autogpt` Python module in your terminal: _(Type this into your CMD window)_ ``` -python scripts/main.py +python -m autogpt ``` 2. After each of action, enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter additional feedback for the AI. @@ -136,7 +136,21 @@ You will find activity and error logs in the folder `./output/logs` To output debug logs: ``` -python scripts/main.py --debug +python -m autogpt --debug +``` + +### Docker + +You can also build this into a docker image and run it: + +``` +docker build -t autogpt . +docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/app/auto_gpt_workspace autogpt +``` + +You can pass extra arguments, for instance, running with `--gpt3only` and `--continuous` mode: +``` +docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/app/auto_gpt_workspace autogpt --gpt3only --continuous ``` ### Command Line Arguments Here are some common arguments you can use when running Auto-GPT: @@ -152,7 +166,7 @@ Here are some common arguments you can use when running Auto-GPT: Use this to use TTS for Auto-GPT ``` -python scripts/main.py --speak +python -m autogpt --speak ``` ## 🔍 Google API Keys Configuration @@ -328,10 +342,10 @@ Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk. -1. Run the `main.py` Python script in your terminal: +1. Run the `autogpt` python module in your terminal: ``` -python scripts/main.py --continuous +python -m autogpt --speak --continuous ``` @@ -342,7 +356,7 @@ python scripts/main.py --continuous If you don't have access to the GPT4 api, this mode will allow you to use Auto-GPT! ``` -python scripts/main.py --gpt3only +python -m autogpt --speak --gpt3only ``` It is recommended to use a virtual machine for tasks that require high security measures to prevent any potential harm to the main computer's system and data. @@ -415,8 +429,8 @@ This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We To run the linter, run the following command: ``` -flake8 scripts/ tests/ +flake8 autogpt/ tests/ # Or, if you want to run flake8 with the same configuration as the CI: -flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302 +flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302 ``` diff --git a/scripts/__init__.py b/autogpt/__init__.py similarity index 100% rename from scripts/__init__.py rename to autogpt/__init__.py diff --git a/scripts/main.py b/autogpt/__main__.py similarity index 98% rename from scripts/main.py rename to autogpt/__main__.py index a12f9c7f5216..05cbed7adf1b 100644 --- a/scripts/main.py +++ b/autogpt/__main__.py @@ -1,16 +1,16 @@ import json import random -import commands as cmd -import utils -from memory import get_memory, get_supported_memory_backends -import chat +from autogpt import commands as cmd +from autogpt import utils +from autogpt.memory import get_memory, get_supported_memory_backends +from autogpt import chat from colorama import Fore, Style -from spinner import Spinner +from autogpt.spinner import Spinner import time -import speak -from config import Config -from json_parser import fix_and_parse_json -from ai_config import AIConfig +from autogpt import speak +from autogpt.config import Config +from autogpt.json_parser import fix_and_parse_json +from autogpt.ai_config import AIConfig import traceback import yaml import argparse diff --git a/scripts/agent_manager.py b/autogpt/agent_manager.py similarity index 97% rename from scripts/agent_manager.py rename to autogpt/agent_manager.py index 191ab838a342..2722b4bef0e7 100644 --- a/scripts/agent_manager.py +++ b/autogpt/agent_manager.py @@ -1,4 +1,4 @@ -from llm_utils import create_chat_completion +from autogpt.llm_utils import create_chat_completion next_key = 0 agents = {} # key, (task, full_message_history, model) diff --git a/scripts/ai_config.py b/autogpt/ai_config.py similarity index 100% rename from scripts/ai_config.py rename to autogpt/ai_config.py diff --git a/scripts/ai_functions.py b/autogpt/ai_functions.py similarity index 96% rename from scripts/ai_functions.py rename to autogpt/ai_functions.py index f4ee79cd7c14..b6e3df4818c2 100644 --- a/scripts/ai_functions.py +++ b/autogpt/ai_functions.py @@ -1,7 +1,7 @@ from typing import List import json -from config import Config -from call_ai_function import call_ai_function +from autogpt.config import Config +from autogpt.call_ai_function import call_ai_function cfg = Config() diff --git a/scripts/browse.py b/autogpt/browse.py similarity index 97% rename from scripts/browse.py rename to autogpt/browse.py index ef22de03d021..32e74fea6516 100644 --- a/scripts/browse.py +++ b/autogpt/browse.py @@ -1,8 +1,8 @@ import requests from bs4 import BeautifulSoup -from memory import get_memory -from config import Config -from llm_utils import create_chat_completion +from autogpt.memory import get_memory +from autogpt.config import Config +from autogpt.llm_utils import create_chat_completion from urllib.parse import urlparse, urljoin cfg = Config() diff --git a/scripts/call_ai_function.py b/autogpt/call_ai_function.py similarity index 90% rename from scripts/call_ai_function.py rename to autogpt/call_ai_function.py index 940eacfe0fbc..5bcd76af60ae 100644 --- a/scripts/call_ai_function.py +++ b/autogpt/call_ai_function.py @@ -1,8 +1,7 @@ -from config import Config - +from autogpt.config import Config cfg = Config() -from llm_utils import create_chat_completion +from autogpt.llm_utils import create_chat_completion # This is a magic function that can do anything with no-code. See diff --git a/scripts/chat.py b/autogpt/chat.py similarity index 97% rename from scripts/chat.py rename to autogpt/chat.py index 5392e4384032..6b901ffa1cdd 100644 --- a/scripts/chat.py +++ b/autogpt/chat.py @@ -1,10 +1,10 @@ import time import openai from dotenv import load_dotenv -from config import Config -import token_counter -from llm_utils import create_chat_completion -from logger import logger +from autogpt.config import Config +from autogpt import token_counter +from autogpt.llm_utils import create_chat_completion +from autogpt.logger import logger import logging cfg = Config() diff --git a/scripts/commands.py b/autogpt/commands.py similarity index 95% rename from scripts/commands.py rename to autogpt/commands.py index 43f5ae42363e..7bcdaa6c9e46 100644 --- a/scripts/commands.py +++ b/autogpt/commands.py @@ -1,15 +1,15 @@ -import browse +from autogpt import browse import json -from memory import get_memory +from autogpt.memory import get_memory import datetime -import agent_manager as agents -import speak -from config import Config -import ai_functions as ai -from file_operations import read_file, write_to_file, append_to_file, delete_file, search_files -from execute_code import execute_python_file, execute_shell -from json_parser import fix_and_parse_json -from image_gen import generate_image +import autogpt.agent_manager as agents +from autogpt import speak +from autogpt.config import Config +import autogpt.ai_functions as ai +from autogpt.file_operations import read_file, write_to_file, append_to_file, delete_file, search_files +from autogpt.execute_code import execute_python_file, execute_shell +from autogpt.json_parser import fix_and_parse_json +from autogpt.image_gen import generate_image from duckduckgo_search import ddg from googleapiclient.discovery import build from googleapiclient.errors import HttpError diff --git a/scripts/config.py b/autogpt/config.py similarity index 100% rename from scripts/config.py rename to autogpt/config.py diff --git a/scripts/data_ingestion.py b/autogpt/data_ingestion.py similarity index 95% rename from scripts/data_ingestion.py rename to autogpt/data_ingestion.py index 9addc34bb274..f87532408d34 100644 --- a/scripts/data_ingestion.py +++ b/autogpt/data_ingestion.py @@ -1,8 +1,8 @@ import argparse import logging -from config import Config -from memory import get_memory -from file_operations import ingest_file, search_files +from autogpt.config import Config +from autogpt.memory import get_memory +from autogpt.file_operations import ingest_file, search_files cfg = Config() diff --git a/scripts/execute_code.py b/autogpt/execute_code.py similarity index 100% rename from scripts/execute_code.py rename to autogpt/execute_code.py diff --git a/scripts/file_operations.py b/autogpt/file_operations.py similarity index 100% rename from scripts/file_operations.py rename to autogpt/file_operations.py diff --git a/scripts/image_gen.py b/autogpt/image_gen.py similarity index 97% rename from scripts/image_gen.py rename to autogpt/image_gen.py index 6c27df3f352b..cc5112e32668 100644 --- a/scripts/image_gen.py +++ b/autogpt/image_gen.py @@ -2,7 +2,7 @@ import io import os.path from PIL import Image -from config import Config +from autogpt.config import Config import uuid import openai from base64 import b64decode diff --git a/scripts/json_parser.py b/autogpt/json_parser.py similarity index 95% rename from scripts/json_parser.py rename to autogpt/json_parser.py index 29995629ae21..36555d5ff666 100644 --- a/scripts/json_parser.py +++ b/autogpt/json_parser.py @@ -1,9 +1,9 @@ import json from typing import Any, Dict, Union -from call_ai_function import call_ai_function -from config import Config -from json_utils import correct_json -from logger import logger +from autogpt.call_ai_function import call_ai_function +from autogpt.config import Config +from autogpt.json_utils import correct_json +from autogpt.logger import logger cfg = Config() diff --git a/scripts/json_utils.py b/autogpt/json_utils.py similarity index 99% rename from scripts/json_utils.py rename to autogpt/json_utils.py index 80aab1928132..8493f09474cf 100644 --- a/scripts/json_utils.py +++ b/autogpt/json_utils.py @@ -1,6 +1,6 @@ import re import json -from config import Config +from autogpt.config import Config cfg = Config() diff --git a/scripts/llm_utils.py b/autogpt/llm_utils.py similarity index 98% rename from scripts/llm_utils.py rename to autogpt/llm_utils.py index 731acae269b6..24f47cc6864b 100644 --- a/scripts/llm_utils.py +++ b/autogpt/llm_utils.py @@ -1,7 +1,7 @@ import time import openai from colorama import Fore -from config import Config +from autogpt.config import Config cfg = Config() diff --git a/scripts/logger.py b/autogpt/logger.py similarity index 98% rename from scripts/logger.py rename to autogpt/logger.py index 4c7e588f209c..096d08916f80 100644 --- a/scripts/logger.py +++ b/autogpt/logger.py @@ -8,9 +8,9 @@ from colorama import Style -import speak -from config import Config -from config import Singleton +from autogpt import speak +from autogpt.config import Config +from autogpt.config import Singleton cfg = Config() diff --git a/scripts/memory/__init__.py b/autogpt/memory/__init__.py similarity index 94% rename from scripts/memory/__init__.py rename to autogpt/memory/__init__.py index 9b53d8d29abe..71f18efd37cb 100644 --- a/scripts/memory/__init__.py +++ b/autogpt/memory/__init__.py @@ -1,5 +1,5 @@ -from memory.local import LocalCache -from memory.no_memory import NoMemory +from autogpt.memory.local import localcache +from autogpt.memory.no_memory import NoMemory # List of supported memory backends # Add a backend to this list if the import attempt is successful diff --git a/scripts/memory/base.py b/autogpt/memory/base.py similarity index 93% rename from scripts/memory/base.py rename to autogpt/memory/base.py index 4dbf6791991a..6b1f083c289a 100644 --- a/scripts/memory/base.py +++ b/autogpt/memory/base.py @@ -1,6 +1,6 @@ """Base class for memory providers.""" import abc -from config import AbstractSingleton, Config +from autogpt.config import AbstractSingleton, Config import openai cfg = Config() diff --git a/scripts/memory/local.py b/autogpt/memory/local.py similarity index 97% rename from scripts/memory/local.py rename to autogpt/memory/local.py index b0afacf6c706..23f632df1b24 100644 --- a/scripts/memory/local.py +++ b/autogpt/memory/local.py @@ -3,7 +3,7 @@ from typing import Any, List, Optional import numpy as np import os -from memory.base import MemoryProviderSingleton, get_ada_embedding +from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding EMBED_DIM = 1536 diff --git a/scripts/memory/no_memory.py b/autogpt/memory/no_memory.py similarity index 100% rename from scripts/memory/no_memory.py rename to autogpt/memory/no_memory.py diff --git a/scripts/memory/pinecone.py b/autogpt/memory/pinecone.py similarity index 98% rename from scripts/memory/pinecone.py rename to autogpt/memory/pinecone.py index 20a905b32c00..1edfc2be2e72 100644 --- a/scripts/memory/pinecone.py +++ b/autogpt/memory/pinecone.py @@ -1,3 +1,4 @@ +from autogpt.config import Config, Singleton import pinecone diff --git a/scripts/memory/redismem.py b/autogpt/memory/redismem.py similarity index 97% rename from scripts/memory/redismem.py rename to autogpt/memory/redismem.py index 49045dd882f9..febfd3a8f150 100644 --- a/scripts/memory/redismem.py +++ b/autogpt/memory/redismem.py @@ -6,8 +6,8 @@ from redis.commands.search.indexDefinition import IndexDefinition, IndexType import numpy as np -from memory.base import MemoryProviderSingleton, get_ada_embedding -from logger import logger +from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding +from autogpt.logger import logger from colorama import Fore, Style diff --git a/scripts/prompt.py b/autogpt/prompt.py similarity index 100% rename from scripts/prompt.py rename to autogpt/prompt.py diff --git a/scripts/promptgenerator.py b/autogpt/promptgenerator.py similarity index 100% rename from scripts/promptgenerator.py rename to autogpt/promptgenerator.py diff --git a/scripts/speak.py b/autogpt/speak.py similarity index 99% rename from scripts/speak.py rename to autogpt/speak.py index 3afa591dd5f4..9fadaa0e526e 100644 --- a/scripts/speak.py +++ b/autogpt/speak.py @@ -1,7 +1,7 @@ import os from playsound import playsound import requests -from config import Config +from autogpt.config import Config cfg = Config() import gtts import threading diff --git a/scripts/spinner.py b/autogpt/spinner.py similarity index 100% rename from scripts/spinner.py rename to autogpt/spinner.py diff --git a/scripts/token_counter.py b/autogpt/token_counter.py similarity index 100% rename from scripts/token_counter.py rename to autogpt/token_counter.py diff --git a/scripts/utils.py b/autogpt/utils.py similarity index 100% rename from scripts/utils.py rename to autogpt/utils.py diff --git a/docker-compose.yml b/docker-compose.yml index af086f05f7e3..79f20bb52120 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,7 +8,7 @@ services: - redis build: ./ volumes: - - "./scripts:/app" + - "./autogpt:/app" - ".env:/app/.env" profiles: ["exclude-from-up"] diff --git a/main.py b/main.py index 656c34ecb69e..160addc390b9 100644 --- a/main.py +++ b/main.py @@ -1 +1 @@ -from scripts.main import main +from autogpt import main diff --git a/scripts/agent.py b/scripts/agent.py new file mode 100644 index 000000000000..cbd6b3e7638d --- /dev/null +++ b/scripts/agent.py @@ -0,0 +1,245 @@ +import commands as cmd +import json +import traceback +from tkinter.ttk import Style + +from colorama import Fore + +import chat +from config import Config +from logger import logger +import speak +from spinner import Spinner + + +class Agent: + """Agent class for interacting with Auto-GPT. + + Attributes: + ai_name: The name of the agent. + memory: The memory object to use. + full_message_history: The full message history. + next_action_count: The number of actions to execute. + prompt: The prompt to use. + user_input: The user input. + + """ + def __init__(self, + ai_name, + memory, + full_message_history, + next_action_count, + prompt, + user_input): + self.ai_name = ai_name + self.memory = memory + self.full_message_history = full_message_history + self.next_action_count = next_action_count + self.prompt = prompt + self.user_input = user_input + + def start_interaction_loop(self): + # Interaction Loop + cfg = Config() + loop_count = 0 + while True: + # Discontinue if continuous limit is reached + loop_count += 1 + if cfg.continuous_mode and cfg.continuous_limit > 0 and loop_count > cfg.continuous_limit: + logger.typewriter_log("Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}") + break + + # Send message to AI, get response + with Spinner("Thinking... "): + assistant_reply = chat.chat_with_ai( + self.prompt, + self.user_input, + self.full_message_history, + self.memory, + cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument + + # Print Assistant thoughts + print_assistant_thoughts(assistant_reply) + + # Get command name and arguments + try: + command_name, arguments = cmd.get_command( + attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)) + if cfg.speak_mode: + speak.say_text(f"I want to execute {command_name}") + except Exception as e: + logger.error("Error: \n", str(e)) + + if not cfg.continuous_mode and self.next_action_count == 0: + ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### + # Get key press: Prompt the user to press enter to continue or escape + # to exit + self.user_input = "" + logger.typewriter_log( + "NEXT ACTION: ", + Fore.CYAN, + f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") + print( + f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {self.ai_name}...", + flush=True) + while True: + console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) + if console_input.lower().rstrip() == "y": + self.user_input = "GENERATE NEXT COMMAND JSON" + break + elif console_input.lower().startswith("y -"): + try: + self.next_action_count = abs(int(console_input.split(" ")[1])) + self.user_input = "GENERATE NEXT COMMAND JSON" + except ValueError: + print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.") + continue + break + elif console_input.lower() == "n": + self.user_input = "EXIT" + break + else: + self.user_input = console_input + command_name = "human_feedback" + break + + if self.user_input == "GENERATE NEXT COMMAND JSON": + logger.typewriter_log( + "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", + Fore.MAGENTA, + "") + elif self.user_input == "EXIT": + print("Exiting...", flush=True) + break + else: + # Print command + logger.typewriter_log( + "NEXT ACTION: ", + Fore.CYAN, + f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") + + # Execute command + if command_name is not None and command_name.lower().startswith("error"): + result = f"Command {command_name} threw the following error: " + arguments + elif command_name == "human_feedback": + result = f"Human feedback: {self.user_input}" + else: + result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}" + if self.next_action_count > 0: + self.next_action_count -= 1 + + memory_to_add = f"Assistant Reply: {assistant_reply} " \ + f"\nResult: {result} " \ + f"\nHuman Feedback: {self.user_input} " + + self.memory.add(memory_to_add) + + # Check if there's a result from the command append it to the message + # history + if result is not None: + self.full_message_history.append(chat.create_chat_message("system", result)) + logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) + else: + self.full_message_history.append( + chat.create_chat_message( + "system", "Unable to execute command")) + logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command") + + +def attempt_to_fix_json_by_finding_outermost_brackets(json_string): + cfg = Config() + if cfg.speak_mode and cfg.debug_mode: + speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.") + logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n") + + try: + # Use regex to search for JSON objects + import regex + json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}") + json_match = json_pattern.search(json_string) + + if json_match: + # Extract the valid JSON object from the string + json_string = json_match.group(0) + logger.typewriter_log(title="Apparently json was fixed.", title_color=Fore.GREEN) + if cfg.speak_mode and cfg.debug_mode: + speak.say_text("Apparently json was fixed.") + else: + raise ValueError("No valid JSON object found") + + except (json.JSONDecodeError, ValueError) as e: + if cfg.speak_mode: + speak.say_text("Didn't work. I will have to ignore this response then.") + logger.error("Error: Invalid JSON, setting it to empty JSON now.\n") + json_string = {} + + return json_string + + +def print_assistant_thoughts(assistant_reply): + """Prints the assistant's thoughts to the console""" + global ai_name + global cfg + cfg = Config() + try: + try: + # Parse and print Assistant response + assistant_reply_json = fix_and_parse_json(assistant_reply) + except json.JSONDecodeError as e: + logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply) + assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply) + assistant_reply_json = fix_and_parse_json(assistant_reply_json) + + # Check if assistant_reply_json is a string and attempt to parse it into a JSON object + if isinstance(assistant_reply_json, str): + try: + assistant_reply_json = json.loads(assistant_reply_json) + except json.JSONDecodeError as e: + logger.error("Error: Invalid JSON\n", assistant_reply) + assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply_json) + + assistant_thoughts_reasoning = None + assistant_thoughts_plan = None + assistant_thoughts_speak = None + assistant_thoughts_criticism = None + assistant_thoughts = assistant_reply_json.get("thoughts", {}) + assistant_thoughts_text = assistant_thoughts.get("text") + + if assistant_thoughts: + assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") + assistant_thoughts_plan = assistant_thoughts.get("plan") + assistant_thoughts_criticism = assistant_thoughts.get("criticism") + assistant_thoughts_speak = assistant_thoughts.get("speak") + + logger.typewriter_log(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text) + logger.typewriter_log("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning) + + if assistant_thoughts_plan: + logger.typewriter_log("PLAN:", Fore.YELLOW, "") + # If it's a list, join it into a string + if isinstance(assistant_thoughts_plan, list): + assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) + elif isinstance(assistant_thoughts_plan, dict): + assistant_thoughts_plan = str(assistant_thoughts_plan) + + # Split the input_string using the newline character and dashes + lines = assistant_thoughts_plan.split('\n') + for line in lines: + line = line.lstrip("- ") + logger.typewriter_log("- ", Fore.GREEN, line.strip()) + + logger.typewriter_log("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism) + # Speak the assistant's thoughts + if cfg.speak_mode and assistant_thoughts_speak: + speak.say_text(assistant_thoughts_speak) + + return assistant_reply_json + except json.decoder.JSONDecodeError as e: + logger.error("Error: Invalid JSON\n", assistant_reply) + if cfg.speak_mode: + speak.say_text("I have received an invalid JSON response from the OpenAI API. I cannot ignore this response.") + + # All other errors, return "Error: + error message" + except Exception as e: + call_stack = traceback.format_exc() + logger.error("Error: \n", call_stack) diff --git a/tests.py b/tests.py index 4dbfdd461ad2..487e00385237 100644 --- a/tests.py +++ b/tests.py @@ -1,8 +1,8 @@ import unittest if __name__ == "__main__": - # Load all tests from the 'scripts/tests' package - suite = unittest.defaultTestLoader.discover('scripts/tests') + # Load all tests from the 'autogpt/tests' package + suite = unittest.defaultTestLoader.discover('autogpt/tests') # Run the tests unittest.TextTestRunner().run(suite) diff --git a/tests/integration/memory_tests.py b/tests/integration/memory_tests.py index d0c309628041..ea96c4c2c9c1 100644 --- a/tests/integration/memory_tests.py +++ b/tests/integration/memory_tests.py @@ -3,16 +3,15 @@ import string import sys from pathlib import Path -# Add the parent directory of the 'scripts' folder to the Python path -sys.path.append(str(Path(__file__).resolve().parent.parent.parent / 'scripts')) -from config import Config -from memory.local import LocalCache +from autogpt.config import Config +from autogpt.memory.local import LocalCache class TestLocalCache(unittest.TestCase): def random_string(self, length): - return ''.join(random.choice(string.ascii_letters) for _ in range(length)) + return ''.join( + random.choice(string.ascii_letters) for _ in range(length)) def setUp(self): cfg = cfg = Config() diff --git a/tests/local_cache_test.py b/tests/local_cache_test.py index 0352624ea26b..601b11d86a2c 100644 --- a/tests/local_cache_test.py +++ b/tests/local_cache_test.py @@ -1,8 +1,7 @@ import os import sys -# Probably a better way: -sys.path.append(os.path.abspath('../scripts')) -from memory.local import LocalCache + +from autogpt.memory.local import LocalCache def MockConfig(): diff --git a/tests/promptgenerator_tests.py b/tests/promptgenerator_tests.py index 181fdea63f26..aac70b5e4377 100644 --- a/tests/promptgenerator_tests.py +++ b/tests/promptgenerator_tests.py @@ -3,9 +3,7 @@ import sys import os -# Add the path to the "scripts" directory to import the PromptGenerator module -sys.path.append(os.path.abspath("../scripts")) -from promptgenerator import PromptGenerator +from autogpt.promptgenerator import PromptGenerator # Create a test class for the PromptGenerator, subclassed from unittest.TestCase diff --git a/tests/test_config.py b/tests/test_config.py index ba8381e1e73a..af5fb2a83022 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,5 +1,5 @@ import unittest -from scripts.config import Config +from autogpt.config import Config class TestConfig(unittest.TestCase): diff --git a/tests/test_json_parser.py b/tests/test_json_parser.py index c403c73dbc19..c9d5e14b692f 100644 --- a/tests/test_json_parser.py +++ b/tests/test_json_parser.py @@ -1,7 +1,7 @@ import unittest import tests.context -from scripts.json_parser import fix_and_parse_json +from autogpt.json_parser import fix_and_parse_json class TestParseJson(unittest.TestCase): diff --git a/tests/unit/json_tests.py b/tests/unit/json_tests.py index 4f3267217a36..77c8594007b2 100644 --- a/tests/unit/json_tests.py +++ b/tests/unit/json_tests.py @@ -1,9 +1,9 @@ import unittest import os import sys -# Probably a better way: -sys.path.append(os.path.abspath('../scripts')) -from json_parser import fix_and_parse_json + + +from autogpt.json_parser import fix_and_parse_json class TestParseJson(unittest.TestCase): @@ -108,6 +108,13 @@ def test_invalid_json_leading_sentence_with_gpt(self): # Assert that this raises an exception: self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj) + def test_that_apologies_containing_multiple_json_get_the_correct_one(self): + bad_json = 'I apologize once again for the error. Here is the corrected format to run the tests: ``` { "name": "execute_python_file", "args": { "file": "" } } ``` Where `` should be replaced with the file path to the test file you created in the previous step. For example: ``` { "name": "execute_python_file", "args": { "file": "tests/test_addition.py" } } ``` This will execute the tests for the `add_numbers` function in `tests/test_addition.py`. Please let me know if you have any further questions.' + actual_json = fix_and_parse_json(bad_json, try_to_fix_with_gpt=True) + expected_json = { "name": "execute_python_file", "args": { "file": "tests/test_addition.py" } } + self.assertEqual(actual_json, expected_json) + # TODO come back to fix this test after fixing imports + if __name__ == '__main__': unittest.main() diff --git a/tests/unit/test_browse_scrape_text.py b/tests/unit/test_browse_scrape_text.py index 9385cde71b07..f98e86285744 100644 --- a/tests/unit/test_browse_scrape_text.py +++ b/tests/unit/test_browse_scrape_text.py @@ -3,7 +3,7 @@ import requests -from scripts.browse import scrape_text +from autogpt.browse import scrape_text """ Code Analysis