Skip to content

Commit

Permalink
Merge pull request #26 from panregedit/feature/v2.0.0/framework_refactor
Browse files Browse the repository at this point in the history
refactor the framework
  • Loading branch information
panregedit authored Oct 20, 2024
2 parents ef4cc43 + 9a819f0 commit c40accb
Show file tree
Hide file tree
Showing 137 changed files with 365 additions and 787 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -150,4 +150,4 @@ tests
data/
tests/mathvista
running_logs/
.db
*.db
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ For more details, check out our paper **[OmAgent: A Multi-modal Agent Framework
logging.init_logger("omagent", "omagent", level="INFO")
registry.import_module(project_root=Path(__file__).parent, custom=["./engine"])
bot_builder = Builder.from_file("workflows/general") # General task processing workflow configuration directory
input = DnCInterface(bot_id="1", task=AgentTask(id=0, task=task))
input = BaseWorkflowContext(bot_id="1", task=AgentTask(id=0, task=task))

bot_builder.run_bot(input)
return input.last_output
Expand Down Expand Up @@ -145,7 +145,7 @@ For more details, check out our paper **[OmAgent: A Multi-modal Agent Framework
logging.init_logger("omagent", "omagent", level="INFO")
registry.import_module(project_root=Path(__file__).parent, custom=["./engine"])
bot_builder = Builder.from_file("workflows/video_understanding") # Video understanding task workflow configuration directory
input = DnCInterface(bot_id="1", task=AgentTask(id=0, task=task))
input = BaseWorkflowContext(bot_id="1", task=AgentTask(id=0, task=task))

bot_builder.run_bot(input)
return input.last_output
Expand Down
2 changes: 1 addition & 1 deletion README_FR.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ Pour plus d'informations, consultez notre article : **[OmAgent : Un cadre d'agen
logging.init_logger("omagent", "omagent", level="INFO")
registry.import_module(project_root=Path(__file__).parent, custom=["./engine"])
bot_builder = Builder.from_file("workflows/general")
input = DnCInterface(bot_id="1", task=AgentTask(id=0, task=task))
input = BaseWorkflowContext(bot_id="1", task=AgentTask(id=0, task=task))

bot_builder.run_bot(input)
return input.last_output
Expand Down
4 changes: 2 additions & 2 deletions README_JP.md
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ OmAgentは3つの主要なコンポーネントで構成されています:
logging.init_logger("omagent", "omagent", level="INFO")
registry.import_module(project_root=Path(__file__).parent, custom=["./engine"])
bot_builder = Builder.from_file("workflows/general") # 一般タスク処理ワークフロー設定ディレクトリ
input = DnCInterface(bot_id="1", task=AgentTask(id=0, task=task))
input = BaseWorkflowContext(bot_id="1", task=AgentTask(id=0, task=task))

bot_builder.run_bot(input)
return input.last_output
Expand Down Expand Up @@ -146,7 +146,7 @@ OmAgentは3つの主要なコンポーネントで構成されています:
logging.init_logger("omagent", "omagent", level="INFO")
registry.import_module(project_root=Path(__file__).parent, custom=["./engine"])
bot_builder = Builder.from_file("workflows/video_understanding") # Video understanding task workflow configuration directory
input = DnCInterface(bot_id="1", task=AgentTask(id=0, task=task))
input = BaseWorkflowContext(bot_id="1", task=AgentTask(id=0, task=task))

bot_builder.run_bot(input)
return input.last_output
Expand Down
4 changes: 2 additions & 2 deletions README_ZH.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ OmAgent包括三个核心组成部分:
logging.init_logger("omagent", "omagent", level="INFO")
registry.import_module(project_root=Path(__file__).parent, custom=["./engine"])
bot_builder = Builder.from_file("workflows/general") # 通用任务处理workflow配置目录
input = DnCInterface(bot_id="1", task=AgentTask(id=0, task=task))
input = BaseWorkflowContext(bot_id="1", task=AgentTask(id=0, task=task))

bot_builder.run_bot(input)
return input.last_output
Expand Down Expand Up @@ -146,7 +146,7 @@ OmAgent包括三个核心组成部分:
logging.init_logger("omagent", "omagent", level="INFO")
registry.import_module(project_root=Path(__file__).parent, custom=["./engine"])
bot_builder = Builder.from_file("workflows/video_understanding") # 视频理解任务workflow配置目录
input = DnCInterface(bot_id="1", task=AgentTask(id=0, task=task))
input = BaseWorkflowContext(bot_id="1", task=AgentTask(id=0, task=task))

bot_builder.run_bot(input)
return input.last_output
Expand Down
12 changes: 0 additions & 12 deletions engine/loop/inf_loop.py

This file was deleted.

3 changes: 0 additions & 3 deletions engine/video_process/__init__.py

This file was deleted.

File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@
from typing import List, Tuple

from colorama import Fore, Style
from omagent_core.core.node.dnc import TaskConqueror
from omagent_core.core.node.dnc.schemas import AgentTask, TaskStatus
from omagent_core.core.prompt.prompt import PromptTemplate
from omagent_core.core.tool_system.manager import ToolManager
from omagent_core.handlers.data_handler.ltm import LTM
from omagent_core.schemas.base import BaseInterface
from omagent_core.advanced_components.node.conqueror.conqueror import TaskConqueror
from omagent_core.engine.workflow.context import BaseWorkflowContext
from omagent_core.models.llms.prompt import PromptTemplate
from omagent_core.tool_system.manager import ToolManager
from omagent_core.memories.ltms.ltm import LTM
from omagent_core.utils.env import EnvVar
from omagent_core.engine.task.agent_task import AgentTask, TaskStatus
from omagent_core.utils.registry import registry
from pydantic import Field
from tenacity import (
Expand Down Expand Up @@ -45,7 +45,7 @@ class VideoConqueror(TaskConqueror):
retry=retry_if_exception_message(message="LLM generation is not valid."),
reraise=True,
)
def _run(self, args: BaseInterface, ltm: LTM) -> Tuple[BaseInterface, str]:
def _run(self, args: BaseWorkflowContext, ltm: LTM) -> Tuple[BaseWorkflowContext, str]:
task: AgentTask = args.task
task.status = TaskStatus.RUNNING

Expand Down Expand Up @@ -197,7 +197,7 @@ def _run(self, args: BaseInterface, ltm: LTM) -> Tuple[BaseInterface, str]:
retry=retry_if_exception_message(message="LLM generation is not valid."),
reraise=True,
)
async def _arun(self, args: BaseInterface, ltm: LTM) -> Tuple[BaseInterface, str]:
async def _arun(self, args: BaseWorkflowContext, ltm: LTM) -> Tuple[BaseWorkflowContext, str]:
task: AgentTask = args.task
task.status = TaskStatus.RUNNING

Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@
from pathlib import Path
from typing import List, Tuple

from omagent_core.core.llm.base import BaseLLMBackend
from omagent_core.core.node.base import BaseDecider
from omagent_core.core.node.dnc.interface import DnCInterface
from omagent_core.core.node.dnc.schemas import AgentTask
from omagent_core.core.prompt.prompt import PromptTemplate
from omagent_core.core.tool_system.manager import ToolManager
from omagent_core.handlers.data_handler.ltm import LTM
from omagent_core.models.llms.base import BaseLLMBackend
from omagent_core.engine.node import BaseDecider
from omagent_core.engine.workflow.context import BaseWorkflowContext
from omagent_core.engine.task.agent_task import AgentTask
from omagent_core.models.llms.prompt import PromptTemplate
from omagent_core.tool_system.manager import ToolManager
from omagent_core.memories.ltms.ltm import LTM
from omagent_core.utils.env import EnvVar
from omagent_core.utils.registry import registry
from pydantic import Field
Expand Down Expand Up @@ -45,7 +45,7 @@ class VideoDivider(BaseLLMBackend, BaseDecider):
retry=retry_if_exception_message(message="LLM generation is not valid."),
reraise=True,
)
def _run(self, args: DnCInterface, ltm: LTM) -> Tuple[DnCInterface, str]:
def _run(self, args: BaseWorkflowContext, ltm: LTM) -> Tuple[BaseWorkflowContext, str]:
task: AgentTask = args.task
if task.task_depth() >= EnvVar.MAX_TASK_DEPTH:
args.last_output = "failed: Max subtask depth reached"
Expand Down Expand Up @@ -97,7 +97,7 @@ def _run(self, args: DnCInterface, ltm: LTM) -> Tuple[DnCInterface, str]:
retry=retry_if_exception_message(message="LLM generation is not valid."),
reraise=True,
)
async def _arun(self, args: DnCInterface, ltm: LTM) -> Tuple[DnCInterface, str]:
async def _arun(self, args: BaseWorkflowContext, ltm: LTM) -> Tuple[BaseWorkflowContext, str]:
task: AgentTask = args.task
if task.task_depth() >= EnvVar.MAX_TASK_DEPTH:
args.last_output = "failed: Max subtask depth reached"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@
from pathlib import Path
from typing import List

from omagent_core.core.llm.base import BaseLLMBackend
from omagent_core.core.node.base import BaseProcessor
from omagent_core.core.node.dnc.interface import DnCInterface
from omagent_core.core.prompt.prompt import PromptTemplate
from omagent_core.handlers.data_handler.ltm import LTM
from omagent_core.models.llms.base import BaseLLMBackend
from omagent_core.engine.node import BaseProcessor
from omagent_core.engine.workflow.context import BaseWorkflowContext
from omagent_core.models.llms.prompt import PromptTemplate
from omagent_core.memories.ltms.ltm import LTM
from omagent_core.utils.registry import registry
from pydantic import Field

Expand All @@ -27,7 +27,7 @@ class VideoQA(BaseProcessor, BaseLLMBackend):
]
)

def _run(self, args: DnCInterface, ltm: LTM) -> DnCInterface:
def _run(self, args: BaseWorkflowContext, ltm: LTM) -> BaseWorkflowContext:
video_md5 = args.kwargs.get("video_md5", None)
self.stm.image_cache.clear()
self.stm.former_results = {}
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
4 changes: 2 additions & 2 deletions omagent-core/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ version = "0.0.1"
authors = [
{ name = "panregedit", email = "[email protected]" },
]
description = "Core package for OMbots"
description = "Core package for OmAgent"
readme = "README.md"
requires-python = ">=3.10, <3.12"
classifiers = [
Expand Down Expand Up @@ -40,4 +40,4 @@ dependencies = [
]

[project.urls]
"Homepage" = "https://git.linker.cc/research/misc/ombot_utils.git"
"Homepage" = "https://github.com/om-ai-lab/OmAgent"
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
from ....engine.node import BaseLoop, Node
from ....memories.ltms.ltm import LTM
from ....engine.workflow.context import BaseWorkflowContext
from ....utils.registry import registry


@registry.register_node()
class InfLoop(BaseLoop):
loop_body: Node

def post_loop_exit(self, args: BaseWorkflowContext, ltm: LTM) -> bool:
return False
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from pathlib import Path
from typing import List

from omagent_core.core.llm.base import BaseLLMBackend
from omagent_core.core.node.base import BaseProcessor
from omagent_core.core.node.dnc.interface import DnCInterface
from omagent_core.core.prompt.prompt import PromptTemplate
from omagent_core.handlers.data_handler.ltm import LTM
from omagent_core.utils.registry import registry
from ....models.llms.base import BaseLLMBackend
from ....engine.node import BaseProcessor
from ....engine.workflow.context import BaseWorkflowContext
from ....models.llms.prompt import PromptTemplate
from ....memories.ltms.ltm import LTM
from ....utils.registry import registry
from pydantic import Field

CURRENT_PATH = root_path = Path(__file__).parents[0]
Expand All @@ -25,7 +25,7 @@ class Conclude(BaseLLMBackend, BaseProcessor):
]
)

def _run(self, args: DnCInterface, ltm: LTM) -> DnCInterface:
def _run(self, args: BaseWorkflowContext, ltm: LTM) -> BaseWorkflowContext:
chat_complete_res = self.simple_infer(
task=args.task.find_root_task().task,
result=args.last_output,
Expand All @@ -40,7 +40,7 @@ def _run(self, args: DnCInterface, ltm: LTM) -> DnCInterface:
print(f"Usage of {key}: {value}")
return args

async def _arun(self, args: DnCInterface, ltm: LTM) -> DnCInterface:
async def _arun(self, args: BaseWorkflowContext, ltm: LTM) -> BaseWorkflowContext:
chat_complete_res = await self.simple_ainfer(
task=args.task.find_root_task().task,
result=args.last_output,
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,15 @@
stop_after_delay,
)

from .....handlers.data_handler.ltm import LTM
from .....schemas.base import BaseInterface
from .....utils.env import EnvVar
from .....utils.registry import registry
from ....llm.base import BaseLLMBackend
from ....prompt.prompt import PromptTemplate
from ....memories.ltms.ltm import LTM
from ....engine.workflow.context import BaseWorkflowContext
from ....utils.env import EnvVar
from ....utils.registry import registry
from ....models.llms.base import BaseLLMBackend
from ....models.llms.prompt.prompt import PromptTemplate
from ....tool_system.manager import ToolManager
from ...base import BaseDecider
from ..schemas import AgentTask, TaskStatus
from ....engine.node.decider import BaseDecider
from ....engine.task.agent_task import AgentTask, TaskStatus

CURRENT_PATH = Path(__file__).parents[0]

Expand All @@ -47,7 +47,7 @@ class TaskConqueror(BaseLLMBackend, BaseDecider):
retry=retry_if_exception_message(message="LLM generation is not valid."),
reraise=True,
)
def _run(self, args: BaseInterface, ltm: LTM) -> Tuple[BaseInterface, str]:
def _run(self, args: BaseWorkflowContext, ltm: LTM) -> Tuple[BaseWorkflowContext, str]:
task: AgentTask = args.task
task.status = TaskStatus.RUNNING

Expand Down Expand Up @@ -190,7 +190,7 @@ def _run(self, args: BaseInterface, ltm: LTM) -> Tuple[BaseInterface, str]:
retry=retry_if_exception_message(message="LLM generation is not valid."),
reraise=True,
)
async def _arun(self, args: BaseInterface, ltm: LTM) -> Tuple[BaseInterface, str]:
async def _arun(self, args: BaseWorkflowContext, ltm: LTM) -> Tuple[BaseWorkflowContext, str]:
task: AgentTask = args.task
task.status = TaskStatus.RUNNING

Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@
stop_after_delay,
)

from .....handlers.data_handler.ltm import LTM
from .....utils.env import EnvVar
from .....utils.registry import registry
from ....llm.base import BaseLLMBackend
from ....prompt.prompt import PromptTemplate
from ....memories.ltms.ltm import LTM
from ....utils.env import EnvVar
from ....utils.registry import registry
from ....models.llms.base import BaseLLMBackend
from ....models.llms.prompt.prompt import PromptTemplate
from ....tool_system.manager import ToolManager
from ...base import BaseDecider
from ..interface import DnCInterface
from ..schemas import AgentTask
from ....engine.node.decider import BaseDecider
from ....engine.workflow.context import BaseWorkflowContext
from ....engine.task.agent_task import AgentTask

CURRENT_PATH = Path(__file__).parents[0]

Expand All @@ -46,7 +46,7 @@ class TaskDivider(BaseLLMBackend, BaseDecider):
retry=retry_if_exception_message(message="LLM generation is not valid."),
reraise=True,
)
def _run(self, args: DnCInterface, ltm: LTM) -> Tuple[DnCInterface, str]:
def _run(self, args: BaseWorkflowContext, ltm: LTM) -> Tuple[BaseWorkflowContext, str]:
task: AgentTask = args.task
if task.task_depth() >= EnvVar.MAX_TASK_DEPTH:
args.last_output = "failed: Max subtask depth reached"
Expand Down Expand Up @@ -97,7 +97,7 @@ def _run(self, args: DnCInterface, ltm: LTM) -> Tuple[DnCInterface, str]:
retry=retry_if_exception_message(message="LLM generation is not valid."),
reraise=True,
)
async def _arun(self, args: DnCInterface, ltm: LTM) -> Tuple[DnCInterface, str]:
async def _arun(self, args: BaseWorkflowContext, ltm: LTM) -> Tuple[BaseWorkflowContext, str]:
task: AgentTask = args.task
if task.task_depth() >= EnvVar.MAX_TASK_DEPTH:
args.last_output = "failed: Max subtask depth reached"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
from omagent_core.core.node.base import BaseLoop, Node
from omagent_core.core.node.dnc import DnCInterface
from omagent_core.handlers.data_handler.ltm import LTM
from omagent_core.utils.registry import registry
from ....engine.node import BaseLoop, Node
from ....memories.ltms.ltm import LTM
from ....engine.workflow.context import BaseWorkflowContext
from ....utils.registry import registry


@registry.register_node()
class DnCLoop(BaseLoop):
loop_body: Node

def post_loop_exit(self, args: DnCInterface, ltm: LTM) -> bool:
def post_loop_exit(self, args: BaseWorkflowContext, ltm: LTM) -> bool:
# self.stm.image_cache.clear()
if args.task.status == "failed":
return True
Expand Down
File renamed without changes.
Loading

0 comments on commit c40accb

Please sign in to comment.