Skip to content

Commit

Permalink
fix: apply ruff formatter
Browse files Browse the repository at this point in the history
  • Loading branch information
nobu007 committed May 20, 2024
1 parent 1dd03b7 commit d1e76a7
Show file tree
Hide file tree
Showing 9 changed files with 27 additions and 31 deletions.
1 change: 1 addition & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.4.4
Expand Down
2 changes: 1 addition & 1 deletion examples/frontend/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
import sys

import streamlit as st
from utils import get_images

from codeinterpreterapi import File
from codeinterpreterapi.config import settings

# Page configuration
st.set_page_config(layout="wide")
Expand Down
9 changes: 1 addition & 8 deletions server.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,8 @@
ConnectorImplCodeinterpreterApi,
)
from gui_agent_loop_core.gui_agent_loop_core import GuiAgentLoopCore
from gui_agent_loop_core.schema.schema import (
GuiAgentInterpreterChatMessage,
GuiAgentInterpreterChatMessages,
GuiAgentInterpreterChatResponse,
GuiAgentInterpreterChatResponseAny,
)
from langchain_core.messages import SystemMessage

from codeinterpreterapi import CodeInterpreterSession, settings
from codeinterpreterapi import CodeInterpreterSession


class CodeInterpreter(ConnectorImplCodeinterpreterApi):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from typing import List, Optional

from langchain.agents.agent import AgentExecutor, AgentOutputParser
from langchain.agents.structured_chat.base import StructuredChatAgent, create_structured_chat_agent
from langchain.agents.structured_chat.base import create_structured_chat_agent
from langchain.tools import BaseTool
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
Expand Down
5 changes: 1 addition & 4 deletions src/codeinterpreterapi/agents/plan_and_execute/prompts.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,4 @@
from textwrap import dedent

from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate
from langchain_experimental.tot.prompts import JSONListOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder

SYSTEM_MESSAGE_TEMPLATE = '''Respond to the human as helpfully and accurately as possible. You have access to the following tools:
Expand Down
6 changes: 2 additions & 4 deletions src/codeinterpreterapi/planners/planners.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain.agents import create_react_agent
from langchain.base_language import BaseLanguageModel
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_experimental.plan_and_execute import load_chat_planner
from langchain_experimental.plan_and_execute.planners.base import LLMPlanner
from langchain_core.runnables import Runnable

SYSTEM_PROMPT_PLANNER = """
Let's first understand the problem and devise a plan to solve the problem.
Expand Down
23 changes: 18 additions & 5 deletions src/codeinterpreterapi/supervisors/supervisors.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
from langchain import hub
import getpass
import os
import platform

from langchain.agents import AgentExecutor
from langchain.chains.base import Chain
from langchain_core.runnables import RunnablePassthrough
from langchain_core.runnables import RunnableAssign, RunnablePassthrough
from langchain_experimental.plan_and_execute.agent_executor import PlanAndExecute
from langchain_experimental.plan_and_execute.planners.base import LLMPlanner

Expand All @@ -13,11 +16,21 @@ class MySupervisorChain(Chain):
class CodeInterpreterSupervisor:
@staticmethod
def choose_supervisor(planner: LLMPlanner, executor: AgentExecutor, verbose: bool = False) -> MySupervisorChain:
# prompt
username = getpass.getuser()
current_working_directory = os.getcwd()
operating_system = platform.system()
info = f"[User Info]\nName: {username}\nCWD: {current_working_directory}\nOS: {operating_system}"
print("choose_supervisor info=", info)

supervisor = PlanAndExecute(planner=planner, executor=executor, verbose=verbose)
prompt = hub.pull("nobu/chat_planner")
agent = create_react_agent(llm, [], prompt)
return agent
# prompt = hub.pull("nobu/chat_planner")
# agent = create_react_agent(llm, [], prompt)
# return agent
# prompt = hub.pull("nobu/code_writer:0c56967d")

supervisor_chain = RunnablePassthrough() | supervisor
return supervisor_chain

supervisor_chain = RunnableAssign() | supervisor
return supervisor_chain
6 changes: 1 addition & 5 deletions src/codeinterpreterapi/thoughts/thoughts.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,9 @@
from gui_agent_loop_core.thoughts.prompts import get_propose_prompt, get_propose_prompt_ja
from langchain.agents import AgentExecutor
from langchain.chains.base import Chain
from langchain.prompts.base import BasePromptTemplate
from langchain_experimental.plan_and_execute.agent_executor import PlanAndExecute
from langchain_experimental.plan_and_execute.planners.base import LLMPlanner
from langchain_experimental.pydantic_v1 import Field

# from langchain_experimental.tot.prompts import get_cot_prompt, get_propose_prompt
from langchain_experimental.tot.thought_generation import BaseThoughtGenerationStrategy, ProposePromptStrategy
from langchain_experimental.tot.thought_generation import ProposePromptStrategy


class MyProposePromptStrategy(ProposePromptStrategy):
Expand Down
4 changes: 1 addition & 3 deletions src/codeinterpreterapi/tools/tools.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
from langchain_community.tools.shell.tool import BaseTool, ShellTool
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.language_models import LLM, BaseLanguageModel, BaseLLM
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import StructuredTool, Tool
from langchain_experimental.chat_models.llm_wrapper import ChatWrapper
from langchain_experimental.llm_bash.base import LLMBashChain

from codeinterpreterapi.config import settings
Expand Down Expand Up @@ -65,7 +64,6 @@ def get_shell_v2(additional_tools: list[BaseTool], llm: BaseLanguageModel) -> li
commands
field required (type=value_error.missing)
"""
llm_runnable = ChatWrapper(llm=llm)
bash_chain = LLMBashChain.from_llm(llm=llm)
bash_tool = Tool(
name="Bash", func=bash_chain.invoke, description="Executes bash commands in a terminal environment."
Expand Down

0 comments on commit d1e76a7

Please sign in to comment.