Skip to content

Commit

Permalink
fix: add crew_agent.py
Browse files Browse the repository at this point in the history
  • Loading branch information
nobu007 committed Jul 18, 2024
1 parent 26af5e0 commit 59ce319
Show file tree
Hide file tree
Showing 2 changed files with 122 additions and 0 deletions.
60 changes: 60 additions & 0 deletions src/codeinterpreterapi/crewai/crew_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
from typing import Dict

from crewai import Agent, Crew, Task

from codeinterpreterapi.agents.agents import CodeInterpreterAgent
from codeinterpreterapi.brain.params import CodeInterpreterParams
from codeinterpreterapi.crewai.custom_agent import (
CustomAgent, # You need to build and extend your own agent logic with the CrewAI BaseAgent class then import it here.
)
from codeinterpreterapi.graphs.agent_wrapper_tool import AgentWrapperTool
from codeinterpreterapi.llm.llm import prepare_test_llm
from codeinterpreterapi.planners.planners import CodeInterpreterPlanner
from codeinterpreterapi.supervisors.supervisors import CodeInterpreterSupervisor
from codeinterpreterapi.test_prompts.test_prompt import TestPrompt


def run(ci_params: CodeInterpreterParams, inputs: Dict):
agents = []
tasks = []
tools = AgentWrapperTool.create_agent_wrapper_tools(ci_params)
for agent_def in ci_params.agent_def_list:
agent_executor = agent_def.agent_executor
role = agent_def.agent_name
goal = "clear information provide for user about " + agent_def.agent_name
backstory = agent_def.agent_role
agent_custom = CustomAgent(agent_executor=agent_executor, role=role, goal=goal, backstory=backstory)
agent = Agent(
role=role,
goal=goal,
backstory=backstory,
tools=[tools[0]],
llm=ci_params.llm_fast,
)
agents.append(agent_custom)

task = Task(
expected_output=agent_def.agent_role + " of {input}",
description=agent_def.agent_role,
agent=agent,
)
tasks.append(task)

my_crew = Crew(agents=agents, tasks=tasks)
result = my_crew.kickoff(inputs=inputs)
return result


def test():
llm, llm_tools, runnable_config = prepare_test_llm()
ci_params = CodeInterpreterParams.get_test_params(llm=llm, llm_tools=llm_tools, runnable_config=runnable_config)
_ = CodeInterpreterAgent.choose_agent_executors(ci_params=ci_params)
planner = CodeInterpreterPlanner.choose_planner(ci_params=ci_params)
_ = CodeInterpreterSupervisor.choose_supervisor(planner=planner, ci_params=ci_params)
inputs = {"input": TestPrompt.svg_input_str}
result = run(ci_params, inputs)
print(result)


if __name__ == "__main__":
test()
62 changes: 62 additions & 0 deletions src/codeinterpreterapi/crewai/custom_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
from typing import Any, Dict, List, Optional

from crewai.agents.agent_builder.base_agent import BaseAgent
from langchain_core.tools import BaseTool
from pydantic import Field


class CustomAgent(BaseAgent):
agent_executor: Any = Field(default=None, description="Verbose mode for the Agent Execution")
function_calling_llm: Optional[Any] = Field(description="Language model that will run the agent.", default=None)
allow_code_execution: Optional[bool] = Field(default=False, description="Enable code execution for the agent.")
step_callback: Optional[Any] = Field(
default=None,
description="Callback to be executed after each step of the agent execution.",
)

def __init__(self, agent_executor: Any, **data):
config = data.pop("config", {})

super().__init__(**config, **data)
self.agent_executor = agent_executor
self.function_calling_llm = "dummy" # This is not used
self.allow_code_execution = False
self.step_callback = None

def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Interpolate inputs into the task description and expected output."""
print("interpolate_inputs inputs=", inputs)
super().interpolate_inputs(inputs)

def execute_task(self, task: Any, context: Optional[str] = None, tools: Optional[List[Any]] = None):
# AgentExecutorを使用してタスクを実行
print("execute_task task=", task)
print("execute_task context=", context)
input_dict = {}
input_dict["input"] = context
input_dict["question"] = context
input_dict["message"] = task.description + "を実行してください。\n" + task.expected_output
result = self.agent_executor.invoke(input=input_dict)
return result

def create_agent_executor(self, tools=None) -> None:
pass

def _parse_tools(self, tools: List[Any]) -> List[Any]:
return []

def parse_tools(self, tools: Optional[List[BaseTool]]) -> List[BaseTool]:
# ツールのパースロジックを実装
return tools or []

def get_delegation_tools(self, agents: List[BaseAgent]):
return []

def get_output_converter(self, llm, text, model, instructions):
return lambda x: x # デフォルトでは変換なし

def execute(self, task_description: str, context: Optional[List[str]] = None):
# タスクの実行ロジックを実装
full_context = "\n".join(context) if context else ""
full_input = f"{full_context}\n\nTask: {task_description}"
return self.executor.run(input=full_input)

0 comments on commit 59ce319

Please sign in to comment.