From cd0f0b9b1b8497a0f69d278a6667d7cc5163aadd Mon Sep 17 00:00:00 2001 From: Ran Li Date: Thu, 11 Jul 2024 11:09:59 +0800 Subject: [PATCH] feat: docs update --- docs/source/customize/client_configuration.rst | 18 +++++++++--------- docs/source/customize/docker-compose_setup.rst | 8 ++++---- docs/source/customize/goal_submission.rst | 2 +- .../customize/integrate_thirdparty_agent.rst | 2 +- docs/source/customize/tool_creation.rst | 4 ++-- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/docs/source/customize/client_configuration.rst b/docs/source/customize/client_configuration.rst index 51251de..2362ca2 100644 --- a/docs/source/customize/client_configuration.rst +++ b/docs/source/customize/client_configuration.rst @@ -16,8 +16,8 @@ The server section is responsible for setting up the basic server configurations .. code-block:: yaml server: - port: SERVER_PORT (e.g. default 7788) - hostname: SERVER_IP (e.g. default ioa-server) + port: SERVER_PORT # e.g. default 7788 + hostname: SERVER_IP # e.g. default ioa-server | @@ -32,11 +32,11 @@ The tool agent section defines the configuration for the tool agent itself and r agent_name: tool agent name desc: |- A description of the tool agent's capabilities. - tool_config: configuration file of tools (e.g tools_code_executor.yaml) + tool_config: configuration file of tools # e.g tools_code_executor.yaml image_name: react-agent container_name: docker container name port: The port number on which the agent's Docker container will be exposed. - model: The model used by the agent (e.g. gpt-4-1106-preview) + model: The model used by the agent # e.g. gpt-4-1106-preview max_num_steps: The maximum number of steps the agent can take in its process. @@ -51,12 +51,12 @@ The communication service client section used for communicating and interacting comm: name: The name of the client. desc: A description of the communication agent's capabilities. - type: The type of the communication agent. (Thing Assistant or Human Assistant) - support_nested teams: Indicates whether the agent supports nested teams. (true or false) + type: The type of the communication agent. # Thing Assistant or Human Assistant + support_nested teams: Indicates whether the agent supports nested teams. # true or false max_team_up_attempts: The maximum number of attempts to team up with other agents. llm: - llm_type: Defines the type of large language model (e.g. openai-chat) - model: Specifies the model for the large language model, indicating the version and type of AI model used (e.g., gpt-4-1106-preview) - temperature: Controls the randomness of the language model's responses (default value is 0.1) + llm_type: Defines the type of large language model # e.g. openai-chat + model: Specifies the model for the large language model, indicating the version and type of AI model used # e.g., gpt-4-1106-preview + temperature: Controls the randomness of the language model's responses # default value is 0.1 \ No newline at end of file diff --git a/docs/source/customize/docker-compose_setup.rst b/docs/source/customize/docker-compose_setup.rst index 1ac03a1..e40ab7c 100644 --- a/docs/source/customize/docker-compose_setup.rst +++ b/docs/source/customize/docker-compose_setup.rst @@ -12,8 +12,8 @@ Create your case-specific :code:`your_case.yml` file in the :code:`dockerfiles/c version: "3" service: - Name: (e.g. WeizeChen) - image: Specifies the Docker image to use for this service (e.g. ioa-client:latest) + Name: # e.g. WeizeChen + image: Specifies the Docker image to use for this service # e.g. ioa-client:latest build: context: ../../ dockerfile: dockerfiles/client.Dockerfile @@ -27,9 +27,9 @@ Create your case-specific :code:`your_case.yml` file in the :code:`dockerfiles/c - ../../configs/client_configs:/app/configs environment: - OPENAI_API_KEY - - CUSTOM_CONFIG=agent configuration file path(e.g. configs/cases/paper_writing/weizechen.yaml) + - CUSTOM_CONFIG=agent configuration file path # e.g. configs/cases/paper_writing/weizechen.yaml ports: - - Maps host_port to container_port, allowing access to the service.(e.g. 5051:5050) + - Maps host_port to container_port, allowing access to the service. # e.g. 5050:5050 depends_on: - Server stdin_open: true diff --git a/docs/source/customize/goal_submission.rst b/docs/source/customize/goal_submission.rst index 4a9d2cc..446a377 100644 --- a/docs/source/customize/goal_submission.rst +++ b/docs/source/customize/goal_submission.rst @@ -19,6 +19,6 @@ The full URL :code:`url: "http://127.0.0.1:5050/launch_goal"` is used to send a json={ "goal": goal, "max_turns": 20, - "team_member_names": [agent_1, agent_2] "(if you have no spcific team members, set it to None)"" + "team_member_names": [agent_1, agent_2] # if you have no spcific team members, set it to None }, ) \ No newline at end of file diff --git a/docs/source/customize/integrate_thirdparty_agent.rst b/docs/source/customize/integrate_thirdparty_agent.rst index 9042f32..9eda453 100644 --- a/docs/source/customize/integrate_thirdparty_agent.rst +++ b/docs/source/customize/integrate_thirdparty_agent.rst @@ -25,7 +25,7 @@ Open Interpreter Integration * The Open Interpreter, located in the :code:`im_client/agents/open_interpreter/open_interpreter_agent.py` script, will be dockerized. This script includes FastAPI POST endpoints, which will be exposed as an HTTP service when started with Uvicorn. When deployed with Docker, these endpoints can be accessed externally. -* **Creating Docker for Open Interpreter**: +* **Creating Dockerfile for Open Interpreter**: * Next, create a Dockerfile in the :code:`dockerfiles/tool_agents` directory. This Dockerfile ensures that tool agents like Open Interpreter can be started with Docker, preventing potential environment conflicts with IoA. diff --git a/docs/source/customize/tool_creation.rst b/docs/source/customize/tool_creation.rst index ac1207c..d1f2031 100644 --- a/docs/source/customize/tool_creation.rst +++ b/docs/source/customize/tool_creation.rst @@ -31,8 +31,8 @@ Tool with required parameters description: your parameters_1 description type: string / number / boolean enum: ["It's necessary if your parameter is set by Literal type OR specified parameter"] - parameters_2: (It's necessary if there are more than 1 parameter in your function) - description: your parameters_1 description + parameters_2: # It's necessary if there are more than 1 parameter in your function + description: your parameters_2 description type: string / number / boolean required: - parameters_1