From d3d80d0e5853debfeb436964550e5f336440ab6f Mon Sep 17 00:00:00 2001 From: jinno Date: Sun, 5 May 2024 14:19:29 +0900 Subject: [PATCH] fix: add experimental function PlanAndExecute --- healthcheck.sh | 2 +- src/codeinterpreterapi/agents/agents.py | 18 +++++-- src/codeinterpreterapi/planners/__init__.py | 1 + src/codeinterpreterapi/planners/planners.py | 12 +++++ src/codeinterpreterapi/session.py | 51 ++++++++++++++++--- .../supervisors/supervisors.py | 15 ++++++ src/codeinterpreterapi/tools/tools.py | 1 - 7 files changed, 87 insertions(+), 13 deletions(-) create mode 100644 src/codeinterpreterapi/planners/__init__.py create mode 100644 src/codeinterpreterapi/planners/planners.py create mode 100644 src/codeinterpreterapi/supervisors/supervisors.py diff --git a/healthcheck.sh b/healthcheck.sh index a28d5837..cf45378b 100755 --- a/healthcheck.sh +++ b/healthcheck.sh @@ -12,7 +12,7 @@ APP_START_PY="/app/server.py" if ! pgrep -f "python ${APP_START_PY}" >/dev/null; then log "${APP_START_PY} is not running. Starting it now..." # open_interpreterを起動 - if bash -c "eval $(pyenv init -) && python ${APP_START_PY} >> $LOG_FILE 2>&1" & then + if bash -c "eval $(pyenv init -) && python ${APP_START_PY} 2>&1 | tee $LOG_FILE" & then log "${APP_START_PY} started successfully." sleep 3600 exit 0 diff --git a/src/codeinterpreterapi/agents/agents.py b/src/codeinterpreterapi/agents/agents.py index 2c0232f6..74b658da 100644 --- a/src/codeinterpreterapi/agents/agents.py +++ b/src/codeinterpreterapi/agents/agents.py @@ -5,6 +5,7 @@ from langchain.chat_models.base import BaseChatModel from langchain.memory.buffer import ConversationBufferMemory from langchain_core.prompts.chat import MessagesPlaceholder +from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner from langchain_openai import AzureChatOpenAI, ChatOpenAI from codeinterpreterapi.config import settings @@ -12,7 +13,7 @@ class CodeInterpreterAgent: @staticmethod - def choose_agent( + def choose_single_chat_agent( llm, tools, ) -> BaseSingleActionAgent: @@ -42,12 +43,12 @@ def choose_agent( @staticmethod def create_agent_and_executor(llm, tools, verbose, chat_memory, callbacks) -> AgentExecutor: # agent - agent = CodeInterpreterAgent.choose_agent(llm, tools) + agent = CodeInterpreterAgent.choose_single_chat_agent(llm, tools) print("create_agent_and_executor agent=", str(type(agent))) # pprint.pprint(agent) # agent_executor - agent_executor = AgentExecutor.from_agent_and_tools( + agent_executor = load_agent_executor( agent=agent, max_iterations=settings.MAX_ITERATIONS, tools=tools, @@ -64,3 +65,14 @@ def create_agent_and_executor(llm, tools, verbose, chat_memory, callbacks) -> Ag pprint.pprint(tool) return agent_executor + + @staticmethod + def create_agent_and_executor_experimental(llm, tools, verbose) -> AgentExecutor: + # agent + agent = CodeInterpreterAgent.choose_single_chat_agent(llm, tools) + print("create_agent_and_executor agent=", str(type(agent))) + + # agent_executor + agent_executor = load_agent_executor(llm, tools, verbose=verbose) + + return agent_executor diff --git a/src/codeinterpreterapi/planners/__init__.py b/src/codeinterpreterapi/planners/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/src/codeinterpreterapi/planners/__init__.py @@ -0,0 +1 @@ + diff --git a/src/codeinterpreterapi/planners/planners.py b/src/codeinterpreterapi/planners/planners.py new file mode 100644 index 00000000..2ef625c8 --- /dev/null +++ b/src/codeinterpreterapi/planners/planners.py @@ -0,0 +1,12 @@ +from langchain.base_language import BaseLanguageModel +from langchain_experimental.plan_and_execute import load_chat_planner +from langchain_experimental.plan_and_execute.planners.base import LLMPlanner + + +class CodeInterpreterPlanner: + @staticmethod + def choose_planner( + llm: BaseLanguageModel, + ) -> LLMPlanner: + planner = load_chat_planner(llm) + return planner diff --git a/src/codeinterpreterapi/session.py b/src/codeinterpreterapi/session.py index 7a2eb7d2..d7b5611a 100644 --- a/src/codeinterpreterapi/session.py +++ b/src/codeinterpreterapi/session.py @@ -18,6 +18,7 @@ from langchain_core.chat_history import BaseChatMessageHistory from langchain_core.language_models import BaseLanguageModel from langchain_core.tools import BaseTool +from langchain_experimental.plan_and_execute.planners.base import LLMPlanner from codeinterpreterapi.chains import ( aget_file_modifications, @@ -31,6 +32,8 @@ from .agents.agents import CodeInterpreterAgent from .llm.llm import CodeInterpreterLlm +from .planners.planners import CodeInterpreterPlanner +from .supervisors.supervisors import CodeInterpreterSupervisor, MySupervisorChain from .tools.tools import CodeInterpreterTools @@ -67,6 +70,8 @@ def __init__( self.callbacks = callbacks self.agent_executor: Optional[AgentExecutor] = None + self.llm_planner: Optional[LLMPlanner] = None + self.supervisor: Optional[MySupervisorChain] = None self.input_files: list[File] = [] self.output_files: list[File] = [] self.code_log: list[tuple[str, str]] = [] @@ -82,20 +87,40 @@ def from_id(cls, session_id: UUID, **kwargs: Any) -> "CodeInterpreterSession": def session_id(self) -> Optional[UUID]: return self.codebox.session_id + def initialize(self): + self.initialize_agent_executor() + self.initialize_llm_planner() + self.initialize_supervisor() + def initialize_agent_executor(self): - agent_executor = CodeInterpreterAgent.create_agent_and_executor( + # self.agent_executor = CodeInterpreterAgent.create_agent_and_executor( + # llm=self.llm, + # tools=self.tools, + # verbose=self.verbose, + # chat_memory=self._history_backend(), + # callbacks=self.callbacks, + # ) + self.agent_executor = CodeInterpreterAgent.create_agent_and_executor_experimental( llm=self.llm, tools=self.tools, verbose=self.verbose, - chat_memory=self._history_backend(), - callbacks=self.callbacks, ) - return agent_executor + + def initialize_llm_planner(self): + self.llm_planner = CodeInterpreterPlanner.choose_planner( + llm=self.llm, + ) + + def initialize_supervisor(self): + self.supervisor = CodeInterpreterSupervisor.choose_supervisor( + planner=self.llm_planner, + executor=self.agent_executor, + ) def start(self) -> SessionStatus: print("start") status = SessionStatus.from_codebox_status(self.codebox.start()) - self.agent_executor = self.initialize_agent_executor() + self.initialize() self.codebox.run( f"!pip install -q {' '.join(settings.CUSTOM_PACKAGES)}", ) @@ -104,7 +129,7 @@ def start(self) -> SessionStatus: async def astart(self) -> SessionStatus: print("astart") status = SessionStatus.from_codebox_status(await self.codebox.astart()) - self.agent_executor = self.initialize_agent_executor() + self.initialize() await self.codebox.arun( f"!pip install -q {' '.join(settings.CUSTOM_PACKAGES)}", ) @@ -112,13 +137,14 @@ async def astart(self) -> SessionStatus: def start_local(self) -> SessionStatus: print("start_local") - self.agent_executor = self.initialize_agent_executor() + self.initialize() status = SessionStatus(status="started") return status async def astart_local(self) -> SessionStatus: print("astart_local") status = self.start_local() + self.initialize() return status def _history_backend(self) -> BaseChatMessageHistory: @@ -360,7 +386,12 @@ def generate_response( self._input_handler(user_request) assert self.agent_executor, "Session not initialized." print(user_request.content) - response = self.agent_executor.invoke(input=user_request.content) + + # ======= ↓↓↓↓ LLM invoke ↓↓↓↓ #======= + # response = self.agent_executor.invoke(input=user_request.content) + response = self.supervisor.invoke(input=user_request.content) + # ======= ↑↑↑↑ LLM invoke ↑↑↑↑ #======= + output = response["output"] print("agent_executor.invoke output=", output) return self._output_handler(output) @@ -387,7 +418,11 @@ async def agenerate_response( try: await self._ainput_handler(user_request) assert self.agent_executor, "Session not initialized." + + # ======= ↓↓↓↓ LLM invoke ↓↓↓↓ #======= response = await self.agent_executor.ainvoke(input=user_request.content) + # ======= ↑↑↑↑ LLM invoke ↑↑↑↑ #======= + output = response["output"] print("agent_executor.invoke output=", output) return await self._aoutput_handler(output) diff --git a/src/codeinterpreterapi/supervisors/supervisors.py b/src/codeinterpreterapi/supervisors/supervisors.py new file mode 100644 index 00000000..a27d27b7 --- /dev/null +++ b/src/codeinterpreterapi/supervisors/supervisors.py @@ -0,0 +1,15 @@ +from langchain.agents import AgentExecutor +from langchain.chains.base import Chain +from langchain_experimental.plan_and_execute.agent_executor import PlanAndExecute +from langchain_experimental.plan_and_execute.planners.base import LLMPlanner + + +class MySupervisorChain(Chain): + pass + + +class CodeInterpreterSupervisor: + @staticmethod + def choose_supervisor(planner: LLMPlanner, executor: AgentExecutor) -> MySupervisorChain: + supervisor = PlanAndExecute(planner=planner, executor=executor, verbose=True) + return supervisor diff --git a/src/codeinterpreterapi/tools/tools.py b/src/codeinterpreterapi/tools/tools.py index 5f4c69aa..901493f3 100644 --- a/src/codeinterpreterapi/tools/tools.py +++ b/src/codeinterpreterapi/tools/tools.py @@ -1,4 +1,3 @@ -from langchain.agents.tools import Tool from langchain_community.tools.shell.tool import ShellInput from langchain_community.tools.tavily_search import TavilySearchResults from langchain_core.tools import BaseTool, StructuredTool