diff --git a/examples/body_data_analysis/README.md b/examples/body_data_analysis/README.md new file mode 100644 index 0000000..fde8c81 --- /dev/null +++ b/examples/body_data_analysis/README.md @@ -0,0 +1,110 @@ +# Body Data Analysis + +This example shows how to implement body data based health analysis based on loop, sample code can be found in the `examples/body_data_analysis` directory. + +```bash + cd examples/body_data_analysis +``` + +## Overview + +This example implements a body data analysis workflow that consists of the following key components: + +1. **Body Data Acquisition** +Acquire and process user body data: the current user body data needs to be entered in advance in `examples/body_data_analysis/agent/body_data_acquisition/body_data.json`, the user can enter all the information, or only part of the information, the Agent will be based on the whether the information is missing or not and take the relevant query. (In the future, we will consider designing to get the relevant body data directly from the body fat scale) + +2. **Interactive QA cycle** + - BodyAnalysisQA: Conduct an interactive QA to gather additional information + - Use web search tools to access specific BodyAnalysis content + - BodyAnalysisDecider: assesses whether enough information is being collected based on the following factors + - The user's physical condition + - Physical health criteria + - Use the DoWhileTask to continue the loop until enough information has been collected. + - The loop terminates when BodyAnalysisDecider returns decision=true. + +3. **Final analysis** + - Body data analysis: Generate analysis based on + - Information gathered in a question and answer cycle + - Other information obtained through web searches + +4. **Workflow Flow** + ``` + Start -> Body Data Acquisition -> Body_analysis_qa_loop(QA + Weather Search + Decision) -> Final analysis -> End + + ``` + +Workflows utilize Redis for state management and the Conductor server for workflow orchestration. This architecture enables +- Acquisition of user body data +- Health advice using web data +- Interactive improvement through structured Q&A +- Context-aware recommendations that combine multiple factors +- Continuous state management throughout the workflow + + +## Prerequisites + +- Python 3.10+ +- Required packages installed (see requirements.txt) +- Access to OpenAI API or compatible endpoint +- Access to Bing API key for web search functionality to search real-time weather information for outfit recommendations (see configs/tools/websearch.yml) +- Redis server running locally or remotely +- Conductor server running locally or remotely + +## Configuration + +The container.yaml file is a configuration file that manages dependencies and settings for different components of the system, including Conductor connections, Redis connections, and other service configurations. To set up your configuration: + +1. Generate the container.yaml file: + ```bash + python compile_container.py + ``` + This will create a container.yaml file with default settings under `examples/body_data_analysis`. + +2. Configure your LLM settings in `configs/llms/gpt.yml` and `configs/llms/text_res.yml`: + - Set your OpenAI API key or compatible endpoint through environment variable or by directly modifying the yml file + ```bash + export custom_openai_key="your_openai_api_key" + export custom_openai_endpoint="your_openai_endpoint" + ``` + - Configure other model settings like temperature as needed through environment variable or by directly modifying the yml file + +3. Configure your Bing Search API key in `configs/tools/websearch.yml`: + - Set your Bing API key through environment variable or by directly modifying the yml file + ```bash + export bing_api_key="your_bing_api_key" + ``` + +4. Update settings in the generated `container.yaml`: + - Modify Redis connection settings: + - Set the host, port and credentials for your Redis instance + - Configure both `redis_stream_client` and `redis_stm_client` sections + - Update the Conductor server URL under conductor_config section + - Adjust any other component settings as needed + +## Running the Example + +1. Run the Body Data Analysis workflow: + + Before running you need to fill in the body data in `examples/body_data_analysis/agent/body_data_acquisition/body_data.json` to simulate the scale results (or not) +If you want to use Chinese prompt, you can change the suffix name of the prompt file in 'body_analysis_qa' and 'body_analysis_decider' to _zh, and set the language to 'zh' in the acquisition `body_data_string = get_body_data(body_data['Body_Data']. language = "zh")` + + For terminal/CLI usage: + ```bash + python run_cli.py + ``` + + + +## Troubleshooting + +If you encounter issues: +- Verify Redis is running and accessible +- Check your OpenAI API key and Bing API key are valid +- Ensure all dependencies are installed correctly +- Review logs for any error messages +- Confirm Conductor server is running and accessible +- Check Redis Stream client and Redis STM client configuration + +## Building the Example + +Coming soon! This section will provide detailed instructions for building the step3_outfit_with_loop example step by step. diff --git a/examples/body_data_analysis/agent/body_analysis/__init__.py b/examples/body_data_analysis/agent/body_analysis/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/body_data_analysis/agent/body_analysis/body_analysis.py b/examples/body_data_analysis/agent/body_analysis/body_analysis.py new file mode 100644 index 0000000..90e0be2 --- /dev/null +++ b/examples/body_data_analysis/agent/body_analysis/body_analysis.py @@ -0,0 +1,47 @@ +from pathlib import Path +from typing import List + +from omagent_core.models.llms.base import BaseLLMBackend +from omagent_core.engine.worker.base import BaseWorker +from omagent_core.utils.registry import registry +from omagent_core.models.llms.prompt.prompt import PromptTemplate +from omagent_core.models.llms.openai_gpt import OpenaiGPTLLM + +from pydantic import Field + + +CURRENT_PATH = Path(__file__).parents[0] + + +@registry.register_worker() +class BodyAnalysis(BaseWorker, BaseLLMBackend): + + llm: OpenaiGPTLLM + + prompts: List[PromptTemplate] = Field( + default=[ + PromptTemplate.from_file( + CURRENT_PATH.joinpath("sys_prompt_en.prompt"), role="system" + ), + PromptTemplate.from_file( + CURRENT_PATH.joinpath("user_prompt_en.prompt"), role="user" + ), + ] + ) + + def _run(self, *args, **kwargs): + + # Retrieve user instruction and optional weather info from workflow context + user_body_data = self.stm(self.workflow_instance_id).get("user_body_data") + + chat_complete_res = self.simple_infer(bodydata=str(user_body_data)) + + # Extract recommendations from LLM response + body_analysis = chat_complete_res["choices"][0]["message"]["content"] + + # Send recommendations via callback and return + self.callback.send_answer(agent_id=self.workflow_instance_id, msg=body_analysis) + + self.stm(self.workflow_instance_id).clear() + return body_analysis + diff --git a/examples/body_data_analysis/agent/body_analysis/sys_prompt_en.prompt b/examples/body_data_analysis/agent/body_analysis/sys_prompt_en.prompt new file mode 100644 index 0000000..695b0dc --- /dev/null +++ b/examples/body_data_analysis/agent/body_analysis/sys_prompt_en.prompt @@ -0,0 +1,26 @@ +You are an experienced fitness personal trainer who specializes in analyzing your own physical condition for your clients based on international health indicators. + +The following factors should be considered for a body condition analysis: + +- Physical indicators provided by the user (e.g. height, weight, age, body fat percentage, muscle mass, etc.) +- Helping users make an analysis based on international health indicators that + + At least contains: + 1. BMI. + 2. body fat percentage, + 3. basal metabolic level (to give an approximate estimate). + + It can additionally contain: + 1. muscle mass, 2. + 2. visceral fat grade, 3. obesity + 3. obesity. + 4. other health indicators that should be analyzed. + +Note: Do not provide content other than the analysis of the user's physical condition, including exercise improvement programs, diet improvement, etc. + +Presentation Requirements: + +- Use simple, easy-to-understand language to ensure that the user can understand and perform +- Use a friendly, encouraging and supportive tone +- Do not use overly specialized terminology unless necessary and provide explanations +- Do not provide additional advice beyond the analysis of the body's state of health (e.g., fitness programs, dietary improvements, etc.) diff --git a/examples/body_data_analysis/agent/body_analysis/sys_prompt_zh.prompt b/examples/body_data_analysis/agent/body_analysis/sys_prompt_zh.prompt new file mode 100644 index 0000000..04d21b7 --- /dev/null +++ b/examples/body_data_analysis/agent/body_analysis/sys_prompt_zh.prompt @@ -0,0 +1,25 @@ +您是一位资深健身私教,擅长为客户基于国际健康指标分析自己的身体状况。 + +身体状况分析应考虑以下因素: + +- 用户提供的身体指标(如身高、体重、年龄、体脂率、肌肉量等) +- 基于国际健康指标帮助用户做出分析, + 至少包含: + 1. BMI, + 2. 体脂率, + 3. 基础代谢水平(给出大概估算数值), + 额外可以包含: + 1. 肌肉率, + 2. 内脏脂肪等级, + 3. 肥胖度, + 4. 其他应该分析的健康指标。 + +注意:不提供除了用户身体状况分析意外的其他内容,包括运动改善计划,饮食改善等内容。 + +表达要求: + +- 使用简单易懂的语言,确保用户能够理解和执行 +- 语气友好、鼓励,并充满支持 +- 不要使用过于专业的术语,除非必要,并提供解释 +- 除了身体的健康状态的分析外不提供额外的其他建议(比如健身计划,饮食改善等) + diff --git a/examples/body_data_analysis/agent/body_analysis/user_prompt_en.prompt b/examples/body_data_analysis/agent/body_analysis/user_prompt_en.prompt new file mode 100644 index 0000000..efb661e --- /dev/null +++ b/examples/body_data_analysis/agent/body_analysis/user_prompt_en.prompt @@ -0,0 +1,5 @@ +Now, it's your turn to complete the task. +Give anwer using the language according to the user's answer. + +Input Information: +- Information about the user's body metrics: {{bodydata}} \ No newline at end of file diff --git a/examples/body_data_analysis/agent/body_analysis/user_prompt_zh.prompt b/examples/body_data_analysis/agent/body_analysis/user_prompt_zh.prompt new file mode 100644 index 0000000..77952ce --- /dev/null +++ b/examples/body_data_analysis/agent/body_analysis/user_prompt_zh.prompt @@ -0,0 +1,5 @@ +现在,轮到你完成任务了。 +根据用户的答案使用相应的语言给出答案。 + +Input Information: +- 用户身体指标信息: {{bodydata}} \ No newline at end of file diff --git a/examples/body_data_analysis/agent/body_analysis_decider/__init__.py b/examples/body_data_analysis/agent/body_analysis_decider/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/body_data_analysis/agent/body_analysis_decider/body_analysis_decider.py b/examples/body_data_analysis/agent/body_analysis_decider/body_analysis_decider.py new file mode 100644 index 0000000..07d4f0c --- /dev/null +++ b/examples/body_data_analysis/agent/body_analysis_decider/body_analysis_decider.py @@ -0,0 +1,81 @@ +import json_repair +import re +from pathlib import Path +from typing import List +from pydantic import Field + +from omagent_core.models.llms.base import BaseLLMBackend +from omagent_core.utils.registry import registry +from omagent_core.models.llms.prompt.prompt import PromptTemplate +from omagent_core.engine.worker.base import BaseWorker +from omagent_core.models.llms.prompt.parser import StrParser +from omagent_core.models.llms.openai_gpt import OpenaiGPTLLM +from omagent_core.utils.logger import logging + + +CURRENT_PATH = root_path = Path(__file__).parents[0] + + +@registry.register_worker() +class BodyAnalysisDecider(BaseLLMBackend, BaseWorker): + llm: OpenaiGPTLLM + prompts: List[PromptTemplate] = Field( + default=[ + PromptTemplate.from_file( + CURRENT_PATH.joinpath("sys_prompt_en.prompt"), role="system" + ), + PromptTemplate.from_file( + CURRENT_PATH.joinpath("user_prompt_en.prompt"), role="user" + ), + ] + ) + + def _run(self, *args, **kwargs): + + # Retrieve conversation context from memory, initializing empty if not present + if self.stm(self.workflow_instance_id).get("user_body_data"): + user_body_data = self.stm(self.workflow_instance_id).get("user_body_data") + else: + user_body_data = [] + + if self.stm(self.workflow_instance_id).get("search_info"): + search_info = self.stm(self.workflow_instance_id).get("search_info") + else: + search_info = [] + + if self.stm(self.workflow_instance_id).get("feedback"): + feedback = self.stm(self.workflow_instance_id).get("feedback") + else: + feedback = [] + + # Query LLM to analyze available information + chat_complete_res = self.simple_infer( + bodydata=str(user_body_data), + previous_search=str(search_info), + feedback=str(feedback) + ) + content = chat_complete_res["choices"][0]["message"].get("content") + content = self._extract_from_result(content) + logging.info(content) + + # Return decision and handle feedback if more information is needed + if content.get("decision") == "ready": + return {"decision": True} + elif content.get("reason"): + feedback.append(content["reason"]) + self.stm(self.workflow_instance_id)["feedback"] = feedback + return {"decision": False} + else: + raise ValueError("LLM generation is not valid.") + + + def _extract_from_result(self, result: str) -> dict: + try: + pattern = r"```json\s+(.*?)\s+```" + match = re.search(pattern, result, re.DOTALL) + if match: + return json_repair.loads(match.group(1)) + else: + return json_repair.loads(result) + except Exception as error: + raise ValueError("LLM generation is not valid.") \ No newline at end of file diff --git a/examples/body_data_analysis/agent/body_analysis_decider/sys_prompt_en.prompt b/examples/body_data_analysis/agent/body_analysis_decider/sys_prompt_en.prompt new file mode 100644 index 0000000..25c14be --- /dev/null +++ b/examples/body_data_analysis/agent/body_analysis_decider/sys_prompt_en.prompt @@ -0,0 +1,36 @@ +You are a helpful fitness personal trainer responsible for collecting information to help users analyze their physical condition based on international health indicators. + +You will receive: +- Information about the user's health indicators +- Information from previous searches +- Feedback from the body data analysis decision maker on what other user information is needed to complete the analysis of the physical health state + +Your task is to analyze all the information provided and determine if enough details have been gathered to generate a good physical health status analysis. + +Please consider gathering information about the user's physical condition from the information required for the following health indicators. +1. BMI. +2. body fat percentage. +3. muscle mass. +4. visceral fat grade. +5. obesity level. +6. basal metabolic level. +7. waist-to-hip ratio +8. other health indicators that should be analyzed. + +You should respond in this format: +{ + "decision": "ready" or "need_more_info", + "reason": "If need_more_info, explain what specific information is still missing and why it's important. If ready, no explaination need to provide." +} + +First and foremost, carefully analyze the user's instruction. If the user explicitly states they want an immediate recommendation or indicates they don't want to answer more questions, you should return "ready" regardless of missing information. + +When evaluating whether there is enough information (only if the user has not asked for an immediate recommendation), consider the following: +1. Do you know the gender of the user? +2. Do you know the user's age? +3. Is the user's height known? +4. Is the user's weight known? +5. Are you aware of the indicators and formulas used for health analysis? +6. Do you know any other specific information that is needed to analyze your health status? + +Your response must be in valid JSON format. Be specific in your reasoning about what information is missing or why the collected information is sufficient. diff --git a/examples/body_data_analysis/agent/body_analysis_decider/sys_prompt_zh.prompt b/examples/body_data_analysis/agent/body_analysis_decider/sys_prompt_zh.prompt new file mode 100644 index 0000000..62236b3 --- /dev/null +++ b/examples/body_data_analysis/agent/body_analysis_decider/sys_prompt_zh.prompt @@ -0,0 +1,38 @@ +你是一名乐于助人的健身私教,负责收集信息,帮助用户基于国际健康指标分析自己的身体状况。 + +请考虑从以下健康指标所需的信息收集用户的身体状况, +1. BMI, +2. 体脂率, +3. 肌肉率, +4. 内脏脂肪等级, +5. 肥胖度, +6. 基础代谢水平, +7. 腰臀比 +8. 其他应该分析的健康指标。 + + +您将接收 +- 用户的身体指标信息 +- 之前搜索过的信息 +- 来自身体数据分析决定者的反馈,说明还需要哪些其他用户信息来完成身体健康状态分析 + +您的任务是分析所有提供的信息,并判断是否已经收集到足够的细节来生成一个好的身体健康状态分析。 + +您应该按照以下格式进行回复: +{ + "decision": "ready" 或 "need_more_info"、 + "reason": "如果需要更多信息("need_more_info"),请解释还缺少哪些具体信息,以及为什么这些信息很重要。如果准备就绪("ready"),则无需解释。" +} + +首先要仔细分析用户当前的信息。如果用户明确表示他们想要立即得到分析,或者表示他们不想回答更多问题,那么无论是否缺少信息,都应该返回"ready"。 + +在评估是否有足够的信息时(仅当用户没有要求立即推荐时),请考虑以下几点: +1. 您知道用户的性别吗? +2. 是否了解用户年龄? +3. 是否了解用户身高? +4. 是否知晓了用户体重? +5. 是否了解健康分析的指标以及计算公式 +6. 是否了解其他需要用于身体健康状态分析的具体信息 + + +您的回复必须是有效的 JSON 格式。请具体说明缺少哪些信息或已收集的信息为何足够。 \ No newline at end of file diff --git a/examples/body_data_analysis/agent/body_analysis_decider/user_prompt_en.prompt b/examples/body_data_analysis/agent/body_analysis_decider/user_prompt_en.prompt new file mode 100644 index 0000000..445c15e --- /dev/null +++ b/examples/body_data_analysis/agent/body_analysis_decider/user_prompt_en.prompt @@ -0,0 +1,7 @@ +现在,轮到你来完成任务了。 + +输入信息: +- 用户身体指标信息: {{bodydata}} +- 之前搜索过的信息:{{previous_search}} +- 决定者的反馈: {{feedback}} + diff --git a/examples/body_data_analysis/agent/body_analysis_decider/user_prompt_zh.prompt b/examples/body_data_analysis/agent/body_analysis_decider/user_prompt_zh.prompt new file mode 100644 index 0000000..bdf786a --- /dev/null +++ b/examples/body_data_analysis/agent/body_analysis_decider/user_prompt_zh.prompt @@ -0,0 +1,7 @@ +Now, it's your turn to complete the task. + +Enter the information: +- Information about the user's body metrics: {{bodydata}} +- Previous searches: {{previous_search}} +- Decider feedback: {{feedback}} + diff --git a/examples/body_data_analysis/agent/body_analysis_qa/__init__.py b/examples/body_data_analysis/agent/body_analysis_qa/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/body_data_analysis/agent/body_analysis_qa/body_analysis_qa.py b/examples/body_data_analysis/agent/body_analysis_qa/body_analysis_qa.py new file mode 100644 index 0000000..1321f73 --- /dev/null +++ b/examples/body_data_analysis/agent/body_analysis_qa/body_analysis_qa.py @@ -0,0 +1,126 @@ +import json_repair +import re +from pathlib import Path +from typing import List + +from pydantic import Field + +from omagent_core.utils.registry import registry +from omagent_core.models.llms.base import BaseLLMBackend +from omagent_core.models.llms.prompt.prompt import PromptTemplate +from omagent_core.tool_system.manager import ToolManager +from omagent_core.engine.worker.base import BaseWorker +from omagent_core.models.llms.prompt.parser import StrParser +from omagent_core.models.llms.openai_gpt import OpenaiGPTLLM +from omagent_core.utils.logger import logging + +CURRENT_PATH = Path(__file__).parents[0] + +@registry.register_worker() +class BodyAnalysisQA(BaseLLMBackend, BaseWorker): + + llm: OpenaiGPTLLM + prompts: List[PromptTemplate] = Field( + default=[ + PromptTemplate.from_file( + CURRENT_PATH.joinpath("sys_prompt_en.prompt"), role="system" + ), + PromptTemplate.from_file( + CURRENT_PATH.joinpath("user_prompt_en.prompt"), role="user" + ), + ] + ) + tool_manager: ToolManager + + def _run(self, *args, **kwargs): + + + # Retrieve conversation context from memory, initializing empty if not present + if self.stm(self.workflow_instance_id).get("user_body_data"): + if isinstance(self.stm(self.workflow_instance_id).get("user_body_data"),str): + user_body_data = [self.stm(self.workflow_instance_id).get("user_body_data")] + else: + user_body_data = self.stm(self.workflow_instance_id).get("user_body_data") + else: + user_body_data = [] + + # if self.stm(self.workflow_instance_id).get("other_body_info"): + # other_body_info = self.stm(self.workflow_instance_id).get("other_body_info") + # else: + # other_body_info = [] + + if self.stm(self.workflow_instance_id).get("search_info"): + search_info = self.stm(self.workflow_instance_id).get("search_info") + else: + search_info = [] + + if self.stm(self.workflow_instance_id).get("feedback"): + feedback = self.stm(self.workflow_instance_id).get("feedback") + else: + feedback = [] + + # Log current conversation state for debugging + chat_structure = { + "user_body_data": user_body_data, + # "other_body_info": other_body_info, + "search_info": search_info, + "feedback": feedback + } + logging.info(chat_structure) + + # Generate next conversation action using LLM + chat_complete_res = self.simple_infer( + bodydata=str(user_body_data), + previous_search=str(search_info), + feedback=str(feedback) + ) + content = chat_complete_res["choices"][0]["message"].get("content") + content = self._extract_from_result(content) + + # Handle follow-up question flow + if content.get("conversation"): + question = content["conversation"] + #self.callback.send_block(self.workflow_instance_id, msg=question) + user_input = self.input.read_input(workflow_instance_id=self.workflow_instance_id, input_prompt=question+'\n') + content = user_input['messages'][-1]['content'] + for content_item in content: + if content_item['type'] == 'text': + answer = content_item['data'] + + # Store Q&A exchange in conversation history + user_body_data.append("Question: "+question+"\n"+"Answer: "+answer) + + self.stm(self.workflow_instance_id)["user_body_data"] = user_body_data + return + + # Handle web search flow for gathering contextual info + elif content.get("tool_call"): + self.callback.info(self.workflow_instance_id, progress='Body analysis QA', message='Search for information') + print('----------------------------------------') + print(content["tool_call"]) + execution_status, execution_results = self.tool_manager.execute_task( + content["tool_call"]+'\nYou should use web search to complete this task.' + ) + if execution_status == "success": + search_info.append(str(execution_results)) + self.stm(self.workflow_instance_id)["search_info"] = search_info + feedback.append(f'The information of \'{content["tool_call"]}\' is provided detailly and specifically, and satisfied the requirement, dont need to ask for more information.') + self.stm(self.workflow_instance_id)["feedback"] = feedback + return + else: + raise ValueError("Web search tool execution failed.") + + else: + raise ValueError("LLM generation is not valid.") + + + def _extract_from_result(self, result: str) -> dict: + try: + pattern = r"```json\s+(.*?)\s+```" + match = re.search(pattern, result, re.DOTALL) + if match: + return json_repair.loads(match.group(1)) + else: + return json_repair.loads(result) + except Exception as error: + raise ValueError("LLM generation is not valid.") \ No newline at end of file diff --git a/examples/body_data_analysis/agent/body_analysis_qa/sys_prompt_en.prompt b/examples/body_data_analysis/agent/body_analysis_qa/sys_prompt_en.prompt new file mode 100644 index 0000000..2b5da25 --- /dev/null +++ b/examples/body_data_analysis/agent/body_analysis_qa/sys_prompt_en.prompt @@ -0,0 +1,47 @@ +You are a helpful fitness personal trainer responsible for collecting information to help users analyze their physical condition based on international health indicators. + +You will receive: +- Information about the user's health indicators +- Information from previous searches +- Feedback from the body data analysis decision maker on what other user information is needed to complete the analysis of the physical health state + +Your role is to collect the complete information through an iterative process. Each time you receive input, you should focus on obtaining a specific piece of missing information based on the following: +- Information about the user's initial body metrics +- Other user information that has been collected +- Feedback from the decision maker on what information is still missing + +Please consider gathering information about the user's physical condition from the information required for the following health indicators. +1. BMI. +2. body fat percentage. +3. muscle mass. +4. visceral fat grade. +5. obesity level. +6. basal metabolic level. +7. waist-to-hip ratio +8. other health indicators that should be analyzed. + +When choosing which information to ask for next, prioritize the most important missing details rather than asking multiple questions at once. This helps keep the conversation focused and makes it easier for users to provide clear answers. Keep track of the information you have already asked for to avoid repeating questions on the same topic. + +1. if you need more specific information from a user about a particular aspect in order to provide a good analysis of a physical condition, respond with a polite conversation question in the following format: +{ + "conversation": "" +} + +2. If you need to search for external information, such as searching for basal metabolism formulas, other health metrics and their formulas, please reply in the following format: +{ + "tool_call": "" +} + +Note: You should only use tool_call if you really need to do additional searches to get external information, otherwise use conversation. +Otherwise, use conversation. And when you use tool_call, you should say “Searching for ...”. + +Your response must be in valid JSON format and contain: “conversation” or “tool_call”. +When deciding what information is needed +- User's gender +- The user's age +- The user's height +- The user's weight +- Any other specific details that would be helpful in analyzing physical health status + +Keep track of which topics you've already asked about and do not repeat questions about the same topic. If you've already asked about color preferences, don't ask about colors again. +Focus on gathering new information about different aspects of the outfit requirements, always maintaining a polite and friendly tone. diff --git a/examples/body_data_analysis/agent/body_analysis_qa/sys_prompt_zh.prompt b/examples/body_data_analysis/agent/body_analysis_qa/sys_prompt_zh.prompt new file mode 100644 index 0000000..e54c2f6 --- /dev/null +++ b/examples/body_data_analysis/agent/body_analysis_qa/sys_prompt_zh.prompt @@ -0,0 +1,48 @@ +你是一名乐于助人的健身私教,负责收集信息,帮助用户基于国际健康指标分析自己的身体状况。 + +请考虑从以下健康指标所需的信息收集用户的身体状况, +1. BMI, +2. 体脂率, +3. 肌肉率, +4. 内脏脂肪等级, +5. 肥胖度, +6. 基础代谢水平, +7. 腰臀比 +8. 其他应该分析的健康指标。 + + +您将接收 +- 用户的身体指标信息 +- 之前搜索过的信息 +- 来自身体数据分析决定者的反馈,说明还需要哪些其他用户信息来完成身体健康状态分析 + +你的职责是通过迭代过程收集完整的信息。每次接收到输入信息时,你都应基于以下几点,集中精力获取一条特定的缺失信息: +- 用户的初始身体指标信息 +- 已收集的其他用户信息 +- 决定者关于还缺少哪些信息的反馈意见 + +在选择下一步询问哪些信息时,优先考虑最重要的缺失细节,而不是同时询问多个问题。这有助于保持对话的重点,让用户更容易提供清晰的答案。跟踪您已经询问过的信息,以避免就同一主题重复提问。 + +1. 如果你需要用户提供更多关于某一方面的具体信息,以便提供一个好的身体状况分析,请用以下格式的礼貌对话问题进行回复: +{ + "conversation": "" +} + +2. 如果您需要搜索外部信息,比如搜索基础代谢计算公式、其他健康指标及其计算公式,请按以下格式回复: +{ + "tool_call": "" +} + +注意:只有在确实需要额外的搜索去获取外部信息是才能使用tool_call,否则请使用conversation。 +并且当您使用tool_call时,您应该说"搜索 ..." + +您的回复必须是有效的 JSON 格式,并包含: "conversation" 或 "tool_call"。 +在决定需要哪些信息时 +- 用户性别 +- 用户年龄 +- 用户身高 +- 用户体重 +- 任何其他有助于身体健康状态分析的具体细节 + +记住您已经询问过的话题,不要重复询问同一话题。如果您已经问过性别,就不要再问性别的问题。 +重点收集有助于身体状况分析不同方面的新信息,始终保持礼貌和友好的语气。 \ No newline at end of file diff --git a/examples/body_data_analysis/agent/body_analysis_qa/user_prompt_en.prompt b/examples/body_data_analysis/agent/body_analysis_qa/user_prompt_en.prompt new file mode 100644 index 0000000..9338a92 --- /dev/null +++ b/examples/body_data_analysis/agent/body_analysis_qa/user_prompt_en.prompt @@ -0,0 +1,8 @@ +Now, it's your turn to complete the task. + +Enter the information: +- Information about the user's body metrics: {{bodydata}} +- Previous searches: {{previous_search}} +- Decider feedback: {{feedback}} + +Your response must be in a valid JSON format and contain the following: "conversation" or "tool_call". \ No newline at end of file diff --git a/examples/body_data_analysis/agent/body_analysis_qa/user_prompt_zh.prompt b/examples/body_data_analysis/agent/body_analysis_qa/user_prompt_zh.prompt new file mode 100644 index 0000000..5057c3e --- /dev/null +++ b/examples/body_data_analysis/agent/body_analysis_qa/user_prompt_zh.prompt @@ -0,0 +1,8 @@ +现在,轮到你来完成任务了。 + +输入信息: +- 用户身体指标信息: {{bodydata}} +- 之前搜索过的信息:{{previous_search}} +- 决定者的反馈: {{feedback}} + +您的回复必须是有效的 JSON 格式,并包含以: "conversation" 或者 "tool_call"。 \ No newline at end of file diff --git a/examples/body_data_analysis/agent/body_data_acquisition/__init__.py b/examples/body_data_analysis/agent/body_data_acquisition/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/body_data_analysis/agent/body_data_acquisition/body_data.json b/examples/body_data_analysis/agent/body_data_acquisition/body_data.json new file mode 100644 index 0000000..b99ed89 --- /dev/null +++ b/examples/body_data_analysis/agent/body_data_acquisition/body_data.json @@ -0,0 +1,13 @@ +{ + "Body_Data": { + "Gender": "Male", + "Age": "25", + "Height": "185cm", + "Weight": "95.85kg", + "Fat": "23.85kg", + "Striated_Muscle": "38.63kg", + "Muscle": "67.5kg", + "Bone": "5.2kg", + "Visceral_Fat_Grade": "9" + } +} \ No newline at end of file diff --git a/examples/body_data_analysis/agent/body_data_acquisition/body_data_acquisiton.py b/examples/body_data_analysis/agent/body_data_acquisition/body_data_acquisiton.py new file mode 100644 index 0000000..acd86ca --- /dev/null +++ b/examples/body_data_analysis/agent/body_data_acquisition/body_data_acquisiton.py @@ -0,0 +1,89 @@ +from pathlib import Path +import json +from omagent_core.utils.registry import registry +from omagent_core.engine.worker.base import BaseWorker +from omagent_core.utils.general import read_image + +CURRENT_PATH = root_path = Path(__file__).parents[0] + +def load_json(json_path: str): + with open(json_path, 'r', encoding='utf-8') as f: + res = json.load(f) + return res + +def get_body_data(body_data, language = 'en'): + # 提取数据 + gender = body_data.get("Gender", "未知") + age = body_data.get("Age", "未知") + height = body_data.get("Height", "未知") + weight = body_data.get("Weight", "未知") + fat = body_data.get("Fat", "未知") + striated_muscle = body_data.get("Striated_Muscle", "未知") + muscle = body_data.get("Muscle", "未知") + bone = body_data.get("Bone", "未知") + visceral_fat_grade = body_data.get("Visceral_Fat_Grade", "未知") + + # 构建字符串 + body_data_string_zh = ( + f"性别: {gender}, " + f"年龄: {age}, " + f"身高: {height}, " + f"体重: {weight}, " + f"脂肪: {fat}, " + f"骨骼肌: {striated_muscle}, " + f"肌肉: {muscle}, " + f"骨骼: {bone}, " + f"内脏脂肪等级: {visceral_fat_grade}" + ) + + body_data_string_en = ( + f"Gender: {gender}, " + f"Age: {age}, " + f"Height: {height}, " + f"Weight: {weight}, " + f"Fat: {fat}, " + f"Striated_Muscle: {striated_muscle}, " + f"Muscle: {muscle}, " + f"Bone: {bone}, " + f"Visceral_Fat_Grade: {visceral_fat_grade}" + ) + if language == 'zh': + return body_data_string_zh + else: + return body_data_string_en + +@registry.register_worker() +class BodyDataAcquisition(BaseWorker): + """ + Body data input processor for processing the user's body data (future support for connecting to a body fat scale to access this data directly). + + The processor allows user body data and provides relevant analysis and recommendations around that data. + + Body data, which currently exists locally in the form of a file agent is automatically fetched, + (with future support for connecting to a body fat scale to fetch this data directly) + this data will be read and cached into the workflow's short term memory (or long term) for use by the downstream processor. + + """ + + def _run(self, *args, **kwargs): + """ + """ + try: + body_data = load_json(CURRENT_PATH.joinpath("body_data.json")) + + body_data_string = get_body_data(body_data['Body_Data']) + # img = read_image(input_source=image_path) + # image_cache = {'' : img} + # self.stm(self.workflow_instance_id)['user_body_data'] = {"user_body_data": body_data_string} + self.stm(self.workflow_instance_id)['user_body_data'] = body_data_string + print(self.stm(self.workflow_instance_id)['user_body_data']) + except Exception as e: + print(CURRENT_PATH.joinpath("body_data.json")) + print(e) + pass + + return + + + + diff --git a/examples/body_data_analysis/agent/body_data_acquisition/body_data_test.txt b/examples/body_data_analysis/agent/body_data_acquisition/body_data_test.txt new file mode 100644 index 0000000..8d1c216 --- /dev/null +++ b/examples/body_data_analysis/agent/body_data_acquisition/body_data_test.txt @@ -0,0 +1,109 @@ +性别:男 +年龄:24 +身高:185cm +体重:95.85kg + +---------------------- +肌肉脂肪分析 +脂肪:23.85kg +骨骼肌:38.63kg +肌肉量:67.5kg +分析: +肌肉量保持在不错的水平,但体重和脂肪是高标准 +建议: +需要注意在平时减脂的时候保持增肌 +---------------------------- +人体成分: +水分:52.53L, 正常 +蛋白质:14.57kg, 正常 +骨量:5.2kg, 正常 +脂肪23.85kg, 过量 + +------------------------------ + +肥胖分析: +BMI:28.3,超体重,中国指标 +全身体脂率:24.6 +内脏脂肪等级:9 +肥胖度:129 + +--------------------- +体重控制 +目标体重:85kg +体重控制:-10.8kg +脂肪控制:-10.8kg +肌肉控制:0 +健康评估:76.1 + +-------------------- +肌肉均衡 +左上肢: +与标准比:106.5% +肌肉量:4.13kg + +右上肢: +与标准比:116.05% +肌肉量:4.5kg + +躯干: +与标准比:101.47% +肌肉量:32.42kg + +左下肢: +与标准比:96.18% +肌肉量:10.84kg + +右下肢: +与标准比:96.04% +肌肉量:10.82kg + +肌肉均衡 +上肢:重度不均 +下肢:均衡 +上下肢:轻度不均 + +身体力量 +上肢:正常 +下肢:正常 +肌肉:发达 + +------------------- +节段脂肪 +左上肢: +与标准比:261.49% +肌肉量:1.73kg + +右上肢: +与标准比:201.47% +肌肉量:1.33kg + +躯干: +与标准比:303.9% +肌肉量:14.28kg + +左下肢: +与标准比:130.57% +肌肉量:2.51kg + +右下肢: +与标准比:132.46% +肌肉量:2.54kg + +-------------------- +测量报告 +皮下脂肪率:21.7 +皮下脂肪率翻译了皮下脂肪的厚度 + +基础代谢:1931 +当前体重下基础代谢标准值为:1952-2306 + +去脂体重:72.3 +去脂体重指的是除脂肪以为的圣体其他成分的重量,肌肉是其中的主要部分 + +肌肉率:70.4 + +体年龄:25 + +脂肪肝风险等级:I级 +存在换上脂肪肝的风险 + diff --git a/examples/body_data_analysis/agent/body_data_acquisition/test.ipynb b/examples/body_data_analysis/agent/body_data_acquisition/test.ipynb new file mode 100644 index 0000000..107ca94 --- /dev/null +++ b/examples/body_data_analysis/agent/body_data_acquisition/test.ipynb @@ -0,0 +1,96 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "\n", + "def load_json(json_path: str):\n", + " with open(json_path, 'r', encoding='utf-8') as f:\n", + " res = json.load(f)\n", + " return res\n", + "\n", + "def get_body_data(body_data):\n", + " # 提取数据\n", + " weight = body_data.get(\"Weight\", \"未知\")\n", + " fat = body_data.get(\"Fat\", \"未知\")\n", + " striated_muscle = body_data.get(\"Striated_Muscle\", \"未知\")\n", + " muscle = body_data.get(\"Muscle\", \"未知\")\n", + " bone = body_data.get(\"Bone\", \"未知\")\n", + " visceral_fat_grade = body_data.get(\"Visceral_Fat_Grade\", \"未知\")\n", + " \n", + " # 构建字符串\n", + " body_data_string = (\n", + " f\"体重: {weight}, \"\n", + " f\"脂肪: {fat}, \"\n", + " f\"骨骼肌: {striated_muscle}, \"\n", + " f\"肌肉: {muscle}, \"\n", + " f\"骨骼: {bone}, \"\n", + " f\"内脏脂肪等级: {visceral_fat_grade}\"\n", + " )\n", + " \n", + " return body_data_string\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "body_data = load_json('./body_data.json')\n", + "\n", + "body_data_string = get_body_data(body_data['Body_Data'])" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "体重: 95.85kg, 脂肪: 23.85kg, 骨骼肌: 38.63kg, 肌肉: 67.5kg, 骨骼: 5.2kg, 内脏脂肪等级: 9\n" + ] + } + ], + "source": [ + "print(body_data_string)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "OmAgent", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.15" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/body_data_analysis/compile_container.py b/examples/body_data_analysis/compile_container.py new file mode 100644 index 0000000..07e66c0 --- /dev/null +++ b/examples/body_data_analysis/compile_container.py @@ -0,0 +1,18 @@ +# Import core modules and components +from omagent_core.utils.container import container +from omagent_core.utils.registry import registry +from pathlib import Path + +# Import all registered modules +registry.import_module() + +CURRENT_PATH = Path(__file__).parents[0] + + +# Register required components +container.register_stm("RedisSTM") +container.register_callback(callback='AppCallback') +container.register_input(input='AppInput') + +# Compile container config +container.compile_config(CURRENT_PATH) diff --git a/examples/body_data_analysis/configs/llms/gpt.yml b/examples/body_data_analysis/configs/llms/gpt.yml new file mode 100644 index 0000000..06efefc --- /dev/null +++ b/examples/body_data_analysis/configs/llms/gpt.yml @@ -0,0 +1,6 @@ +name: OpenaiGPTLLM +model_id: gpt-4o +api_key: ${env| custom_openai_key, openai_api_key} +endpoint: ${env| custom_openai_endpoint, https://api.openai.com/v1} +temperature: 0 +vision: true \ No newline at end of file diff --git a/examples/body_data_analysis/configs/llms/text_res.yml b/examples/body_data_analysis/configs/llms/text_res.yml new file mode 100644 index 0000000..50aef99 --- /dev/null +++ b/examples/body_data_analysis/configs/llms/text_res.yml @@ -0,0 +1,6 @@ +name: OpenaiGPTLLM +model_id: gpt-4o +api_key: ${env| custom_openai_key, openai_api_key} +endpoint: ${env| custom_openai_endpoint, https://api.openai.com/v1} +temperature: 0 +vision: false \ No newline at end of file diff --git a/examples/body_data_analysis/configs/tools/websearch.yml b/examples/body_data_analysis/configs/tools/websearch.yml new file mode 100644 index 0000000..5544864 --- /dev/null +++ b/examples/body_data_analysis/configs/tools/websearch.yml @@ -0,0 +1,5 @@ +llm: ${sub| text_res} +tools: + - name: WebSearch + bing_api_key: ${env| bing_api_key, null} + llm: ${sub|text_res} \ No newline at end of file diff --git a/examples/body_data_analysis/configs/workers/body_analysis.yml b/examples/body_data_analysis/configs/workers/body_analysis.yml new file mode 100644 index 0000000..32760e7 --- /dev/null +++ b/examples/body_data_analysis/configs/workers/body_analysis.yml @@ -0,0 +1,2 @@ +name: BodyAnalysis +llm: ${sub| gpt} \ No newline at end of file diff --git a/examples/body_data_analysis/configs/workers/body_analysis_decider.yml b/examples/body_data_analysis/configs/workers/body_analysis_decider.yml new file mode 100644 index 0000000..5de26f2 --- /dev/null +++ b/examples/body_data_analysis/configs/workers/body_analysis_decider.yml @@ -0,0 +1,2 @@ +name: BodyAnalysisDecider +llm: ${sub| gpt} \ No newline at end of file diff --git a/examples/body_data_analysis/configs/workers/body_analysis_qa.yml b/examples/body_data_analysis/configs/workers/body_analysis_qa.yml new file mode 100644 index 0000000..846a698 --- /dev/null +++ b/examples/body_data_analysis/configs/workers/body_analysis_qa.yml @@ -0,0 +1,2 @@ +name: BodyAnalysisQA +llm: ${sub| text_res} \ No newline at end of file diff --git a/examples/body_data_analysis/configs/workers/body_data_acquisition.yml b/examples/body_data_analysis/configs/workers/body_data_acquisition.yml new file mode 100644 index 0000000..6fb7191 --- /dev/null +++ b/examples/body_data_analysis/configs/workers/body_data_acquisition.yml @@ -0,0 +1 @@ +name: BodyDataAcquisition \ No newline at end of file diff --git a/examples/body_data_analysis/container.yaml b/examples/body_data_analysis/container.yaml new file mode 100644 index 0000000..67fe546 --- /dev/null +++ b/examples/body_data_analysis/container.yaml @@ -0,0 +1,84 @@ +conductor_config: + name: Configuration + base_url: + value: http://localhost:8080 + description: The Conductor Server API endpoint + env_var: CONDUCTOR_SERVER_URL + auth_key: + value: null + description: The authorization key + env_var: AUTH_KEY + auth_secret: + value: null + description: The authorization secret + env_var: CONDUCTOR_AUTH_SECRET + auth_token_ttl_min: + value: 45 + description: The authorization token refresh interval in minutes. + env_var: AUTH_TOKEN_TTL_MIN + debug: + value: true + description: Debug mode + env_var: DEBUG +connectors: + redis_stream_client: + name: RedisConnector + host: + value: localhost + env_var: HOST + port: + value: 6380 + env_var: PORT + password: + value: null + env_var: PASSWORD + username: + value: null + env_var: USERNAME + db: + value: 0 + env_var: DB + redis_stm_client: + name: RedisConnector + host: + value: localhost + env_var: HOST + port: + value: 6380 + env_var: PORT + password: + value: null + env_var: PASSWORD + username: + value: null + env_var: USERNAME + db: + value: 0 + env_var: DB +components: + AppCallback: + name: AppCallback + bot_id: + value: '' + env_var: BOT_ID + start_time: + value: 2024-11-29_15:25:10 + env_var: START_TIME + folder_name: + value: ./running_logs/2024-11-29_15:25:10 + env_var: FOLDER_NAME + AppInput: + name: AppInput + DefaultCallback: + name: DefaultCallback + bot_id: + value: '' + env_var: BOT_ID + start_time: + value: 2024-11-29_15:25:10 + env_var: START_TIME + folder_name: + value: ./running_logs/2024-11-29_15:25:10 + env_var: FOLDER_NAME + RedisSTM: + name: RedisSTM diff --git a/examples/body_data_analysis/run_cli.py b/examples/body_data_analysis/run_cli.py new file mode 100644 index 0000000..2af8c5d --- /dev/null +++ b/examples/body_data_analysis/run_cli.py @@ -0,0 +1,67 @@ +# Import core modules for workflow management and configuration +from omagent_core.utils.container import container +from omagent_core.engine.workflow.conductor_workflow import ConductorWorkflow +from omagent_core.engine.workflow.task.simple_task import simple_task +from omagent_core.utils.logger import logging +logging.init_logger("omagent", "omagent", level="INFO") + +# Import registry and CLI client modules +from omagent_core.utils.registry import registry +from omagent_core.clients.devices.cli.client import DefaultClient + +from pathlib import Path +CURRENT_PATH = Path(__file__).parents[0] + +# Import and register worker modules from agent directory +registry.import_module(project_path=CURRENT_PATH.joinpath('agent')) + +# Add parent directory to Python path +import sys +import os +sys.path.append(os.path.abspath(CURRENT_PATH.joinpath('../../'))) + +# Import custom outfit image input worker + + + +# Import loop task type for iterative Q&A +from omagent_core.engine.workflow.task.do_while_task import DoWhileTask + + +# Configure Redis storage and load container settings +container.register_stm("RedisSTM") +container.from_config(CURRENT_PATH.joinpath('container.yaml')) + + +# Initialize outfit recommendation workflow +workflow = ConductorWorkflow(name='body_analysis') + +# Define workflow tasks: +# 1. Get uer input + +task1 = simple_task(task_def_name="BodyDataAcquisition", task_reference_name="body_data_acquisition") + +# 2. Ask questions about the outfit +task2 = simple_task(task_def_name='BodyAnalysisQA', task_reference_name='body_analysis_qa') + +# 3. Check if enough information is gathered +task3 = simple_task(task_def_name='BodyAnalysisDecider', task_reference_name='body_analysis_decider') + +# 4. Generate final outfit recommendations +task4 = simple_task(task_def_name='BodyAnalysis', task_reference_name='body_analysis') + +# Create loop that continues Q&A until sufficient information is gathered +# Loop terminates when outfit_decider returns decision=true +bodyanalysis_qa_loop = DoWhileTask(task_ref_name='body_analysis_loop', tasks=[task2, task3], + termination_condition='if ($.body_analysis_decider["decision"] == true){false;} else {true;} ') + +# Define workflow sequence: image input -> Q&A loop -> final recommendation +workflow >> task1 >> bodyanalysis_qa_loop >> task4 + +# Register workflow with conductor server +workflow.register(True) + +# Initialize and start CLI client with workflow and image input worker +config_path = CURRENT_PATH.joinpath('configs') +cli_client = DefaultClient(interactor=workflow, config_path=config_path) +cli_client.start_interactor() diff --git a/omagent-core/src/omagent_core/clients/devices/webpage/client.py b/omagent-core/src/omagent_core/clients/devices/webpage/client.py index 988189c..4c4bb49 100644 --- a/omagent-core/src/omagent_core/clients/devices/webpage/client.py +++ b/omagent-core/src/omagent_core/clients/devices/webpage/client.py @@ -8,11 +8,11 @@ from omagent_core.engine.automator.task_handler import TaskHandler from omagent_core.clients.devices.app.callback import AppCallback from omagent_core.clients.devices.app.input import AppInput -import yaml from omagent_core.utils.container import container from omagent_core.utils.registry import registry from omagent_core.services.connectors.redis import RedisConnector from omagent_core.utils.logger import logging +import html registry.import_module() @@ -35,12 +35,43 @@ def __init__( self._config_path = config_path self._workers = workers self._workflow_instance_id = None + self._custom_css = """ + #OmAgent { + height: 100vh !important; + max-height: calc(100vh - 190px) !important; + overflow-y: auto; + } + + .running-message { + margin: 0; + padding: 2px 4px; + white-space: pre-wrap; + word-wrap: break-word; + font-family: inherit; + } + + /* Remove the background and border of the message box */ + .message-wrap { + background: none !important; + border: none !important; + padding: 0 !important; + margin: 0 !important; + } + + /* Remove the bubble style of the running message */ + .message:has(.running-message) { + background: none !important; + border: none !important; + padding: 0 !important; + box-shadow: none !important; + } + """ def start_interactor(self): worker_config = build_from_file(self._config_path) self._task_handler_interactor = TaskHandler(worker_config=worker_config, workers=self._workers) self._task_handler_interactor.start_processes() - with gr.Blocks(title="OmAgent", css="#OmAgent { height: 100vh !important; max-height: calc(100vh - 190px) !important; overflow-y: auto; }") as chat_interface: + with gr.Blocks(title="OmAgent", css=self._custom_css) as chat_interface: chatbot = gr.Chatbot( elem_id="OmAgent", bubble_full_width=False, @@ -70,7 +101,7 @@ def start_processor(self): self._task_handler_processor = TaskHandler(worker_config=worker_config, workers=self._workers) self._task_handler_processor.start_processes() - with gr.Blocks(title="OmAgent", css="#OmAgent { height: 100vh !important; max-height: calc(100vh - 190px) !important; overflow-y: auto; }") as chat_interface: + with gr.Blocks(title="OmAgent", css=self._custom_css) as chat_interface: chatbot = gr.Chatbot( elem_id="OmAgent", bubble_full_width=False, @@ -150,9 +181,10 @@ def bot(self, history: list): payload_data = self._get_message_payload(message) if payload_data is None: continue - progress = payload_data.get("progress") - message = payload_data.get("message") - history.append({"role": "assistant", "content": f"`{progress}: {message}`"}) + progress = html.escape(payload_data.get("progress", "")) + message = html.escape(payload_data.get("message", "")) + formatted_message = f'
{progress}: {message}
' + history.append({"role": "assistant", "content": formatted_message}) yield history container.get_connector('redis_stream_client')._client.xack( @@ -190,12 +222,12 @@ def bot(self, history: list): sleep(1) def processor_bot(self, history: list): - history.append({"role": "assistant", "content": f"`processing...`"}) + history.append({"role": "assistant", "content": f"processing..."}) yield history while True: status = self._processor.get_workflow(workflow_id=self._workflow_instance_id).status if status == 'COMPLETED': - history.append({"role": "assistant", "content": f"`completed`"}) + history.append({"role": "assistant", "content": f"completed"}) yield history self._workflow_instance_id = None break