diff --git a/.github/workflows/deploy-website.yml b/.github/workflows/deploy-website.yml
index e576ca1b4db..4770c8d0d0d 100644
--- a/.github/workflows/deploy-website.yml
+++ b/.github/workflows/deploy-website.yml
@@ -3,23 +3,14 @@ name: docs
on:
pull_request:
branches: ["0.2"]
- paths:
- - "autogen/*"
- - "website/*"
- - ".github/workflows/deploy-website.yml"
push:
branches: ["0.2"]
- paths:
- - "autogen/*"
- - "website/*"
- - ".github/workflows/deploy-website.yml"
workflow_dispatch:
permissions:
id-token: write
pages: write
jobs:
checks:
- if: github.event_name != 'push'
runs-on: ubuntu-latest
defaults:
run:
diff --git a/LICENSE-CODE-KUBERNETES b/LICENSE-CODE-KUBERNETES
new file mode 100644
index 00000000000..45cadb395ec
--- /dev/null
+++ b/LICENSE-CODE-KUBERNETES
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2014 The Kubernetes Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
index 0599300c539..86118d4c46a 100644
--- a/README.md
+++ b/README.md
@@ -3,7 +3,7 @@
-
+
![Python Version](https://img.shields.io/badge/3.8%20%7C%203.9%20%7C%203.10%20%7C%203.11%20%7C%203.12-blue) [![PyPI - Version](https://img.shields.io/pypi/v/autogen-agentchat)](https://pypi.org/project/autogen-agentchat/)
[![NuGet version](https://badge.fury.io/nu/AutoGen.Core.svg)](https://badge.fury.io/nu/AutoGen.Core)
diff --git a/autogen/agentchat/contrib/agent_builder.py b/autogen/agentchat/contrib/agent_builder.py
index 430017d13fc..7eaec3eef74 100644
--- a/autogen/agentchat/contrib/agent_builder.py
+++ b/autogen/agentchat/contrib/agent_builder.py
@@ -172,6 +172,26 @@ class AgentBuilder:
```
"""
+ AGENT_FUNCTION_MAP_PROMPT = """Consider the following function.
+ Function Name: {function_name}
+ Function Description: {function_description}
+
+ The agent details are given in the format: {format_agent_details}
+
+ Which one of the following agents should be able to execute this function, preferably an agent with programming background?
+ {agent_details}
+
+ Hint:
+ # Only respond with the name of the agent that is most suited to execute the function and nothing else.
+ """
+
+ UPDATED_AGENT_SYSTEM_MESSAGE = """
+ {agent_system_message}
+
+ You have access to execute the function: {function_name}.
+ With following description: {function_description}
+ """
+
def __init__(
self,
config_file_or_env: Optional[str] = "OAI_CONFIG_LIST",
@@ -358,6 +378,7 @@ def build(
self,
building_task: str,
default_llm_config: Dict,
+ list_of_functions: Optional[List[Dict]] = None,
coding: Optional[bool] = None,
code_execution_config: Optional[Dict] = None,
use_oai_assistant: Optional[bool] = False,
@@ -373,6 +394,7 @@ def build(
coding: use to identify if the user proxy (a code interpreter) should be added.
code_execution_config: specific configs for user proxy (e.g., last_n_messages, work_dir, ...).
default_llm_config: specific configs for LLM (e.g., config_list, seed, temperature, ...).
+ list_of_functions: list of functions to be associated with Agents
use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
user_proxy: user proxy's class that can be used to replace the default user proxy.
@@ -480,8 +502,9 @@ def build(
"code_execution_config": code_execution_config,
}
)
+
_config_check(self.cached_configs)
- return self._build_agents(use_oai_assistant, user_proxy=user_proxy, **kwargs)
+ return self._build_agents(use_oai_assistant, list_of_functions, user_proxy=user_proxy, **kwargs)
def build_from_library(
self,
@@ -653,13 +676,18 @@ def build_from_library(
return self._build_agents(use_oai_assistant, user_proxy=user_proxy, **kwargs)
def _build_agents(
- self, use_oai_assistant: Optional[bool] = False, user_proxy: Optional[autogen.ConversableAgent] = None, **kwargs
+ self,
+ use_oai_assistant: Optional[bool] = False,
+ list_of_functions: Optional[List[Dict]] = None,
+ user_proxy: Optional[autogen.ConversableAgent] = None,
+ **kwargs,
) -> Tuple[List[autogen.ConversableAgent], Dict]:
"""
Build agents with generated configs.
Args:
use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
+ list_of_functions: list of functions to be associated to Agents
user_proxy: user proxy's class that can be used to replace the default user proxy.
Returns:
@@ -695,6 +723,53 @@ def _build_agents(
)
agent_list = agent_list + [user_proxy]
+ agent_details = []
+
+ for agent in agent_list[:-1]:
+ agent_details.append({"name": agent.name, "description": agent.description})
+
+ if list_of_functions:
+ for func in list_of_functions:
+ resp = (
+ self.builder_model.create(
+ messages=[
+ {
+ "role": "user",
+ "content": self.AGENT_FUNCTION_MAP_PROMPT.format(
+ function_name=func["name"],
+ function_description=func["description"],
+ format_agent_details='[{"name": "agent_name", "description": "agent description"}, ...]',
+ agent_details=str(json.dumps(agent_details)),
+ ),
+ }
+ ]
+ )
+ .choices[0]
+ .message.content
+ )
+
+ autogen.agentchat.register_function(
+ func["function"],
+ caller=self.agent_procs_assign[resp][0],
+ executor=agent_list[0],
+ name=func["name"],
+ description=func["description"],
+ )
+
+ agents_current_system_message = [
+ agent["system_message"] for agent in agent_configs if agent["name"] == resp
+ ][0]
+
+ self.agent_procs_assign[resp][0].update_system_message(
+ self.UPDATED_AGENT_SYSTEM_MESSAGE.format(
+ agent_system_message=agents_current_system_message,
+ function_name=func["name"],
+ function_description=func["description"],
+ )
+ )
+
+ print(f"Function {func['name']} is registered to agent {resp}.")
+
return agent_list, self.cached_configs.copy()
def save(self, filepath: Optional[str] = None) -> str:
diff --git a/autogen/agentchat/contrib/vectordb/couchbase.py b/autogen/agentchat/contrib/vectordb/couchbase.py
index 66691fa2f2b..ae978ef126b 100644
--- a/autogen/agentchat/contrib/vectordb/couchbase.py
+++ b/autogen/agentchat/contrib/vectordb/couchbase.py
@@ -56,16 +56,7 @@ def __init__(
wait_until_index_ready (float | None): Blocking call to wait until the database indexes are ready. None means no wait. Default is None.
wait_until_document_ready (float | None): Blocking call to wait until the database documents are ready. None means no wait. Default is None.
"""
- print(
- "CouchbaseVectorDB",
- connection_string,
- username,
- password,
- bucket_name,
- scope_name,
- collection_name,
- index_name,
- )
+
self.embedding_function = embedding_function
self.index_name = index_name
@@ -119,6 +110,7 @@ def create_collection(
try:
collection_mgr = self.bucket.collections()
collection_mgr.create_collection(self.scope.name, collection_name)
+ self.cluster.query(f"CREATE PRIMARY INDEX ON {self.bucket.name}.{self.scope.name}.{collection_name}")
except Exception:
if not get_or_create:
@@ -287,7 +279,12 @@ def upsert_docs(
[doc["content"]]
).tolist() # Gets new embedding even in case of document update
- doc_content = {TEXT_KEY: doc["content"], "metadata": doc.get("metadata", {}), EMBEDDING_KEY: embedding}
+ doc_content = {
+ TEXT_KEY: doc["content"],
+ "metadata": doc.get("metadata", {}),
+ EMBEDDING_KEY: embedding,
+ "id": doc_id,
+ }
docs_to_upsert[doc_id] = doc_content
collection.upsert_multi(docs_to_upsert)
diff --git a/autogen/coding/kubernetes/__init__.py b/autogen/coding/kubernetes/__init__.py
new file mode 100644
index 00000000000..3129ec86bf3
--- /dev/null
+++ b/autogen/coding/kubernetes/__init__.py
@@ -0,0 +1,5 @@
+from .pod_commandline_code_executor import PodCommandLineCodeExecutor
+
+__all__ = [
+ "PodCommandLineCodeExecutor",
+]
diff --git a/autogen/coding/kubernetes/pod_commandline_code_executor.py b/autogen/coding/kubernetes/pod_commandline_code_executor.py
new file mode 100644
index 00000000000..a232a392271
--- /dev/null
+++ b/autogen/coding/kubernetes/pod_commandline_code_executor.py
@@ -0,0 +1,323 @@
+from __future__ import annotations
+
+import atexit
+import importlib
+import sys
+import textwrap
+import uuid
+from hashlib import md5
+from pathlib import Path
+from time import sleep
+from types import TracebackType
+from typing import Any, ClassVar, Dict, List, Optional, Type, Union
+
+client = importlib.import_module("kubernetes.client")
+config = importlib.import_module("kubernetes.config")
+ApiException = importlib.import_module("kubernetes.client.rest").ApiException
+stream = importlib.import_module("kubernetes.stream").stream
+
+from ...code_utils import TIMEOUT_MSG, _cmd
+from ..base import CodeBlock, CodeExecutor, CodeExtractor, CommandLineCodeResult
+from ..markdown_code_extractor import MarkdownCodeExtractor
+from ..utils import _get_file_name_from_content, silence_pip
+
+if sys.version_info >= (3, 11):
+ from typing import Self
+else:
+ from typing_extensions import Self
+
+
+class PodCommandLineCodeExecutor(CodeExecutor):
+ DEFAULT_EXECUTION_POLICY: ClassVar[Dict[str, bool]] = {
+ "bash": True,
+ "shell": True,
+ "sh": True,
+ "pwsh": False,
+ "powershell": False,
+ "ps1": False,
+ "python": True,
+ "javascript": False,
+ "html": False,
+ "css": False,
+ }
+ LANGUAGE_ALIASES: ClassVar[Dict[str, str]] = {
+ "py": "python",
+ "js": "javascript",
+ }
+ LANGUAGE_FILE_EXTENSION: ClassVar[Dict[str, str]] = {
+ "python": "py",
+ "javascript": "js",
+ "bash": "sh",
+ "shell": "sh",
+ "sh": "sh",
+ }
+
+ def __init__(
+ self,
+ image: str = "python:3-slim",
+ pod_name: Optional[str] = None,
+ namespace: Optional[str] = None,
+ pod_spec: Optional[client.V1Pod] = None, # type: ignore
+ container_name: Optional[str] = "autogen-code-exec",
+ timeout: int = 60,
+ work_dir: Union[Path, str] = Path("/workspace"),
+ kube_config_file: Optional[str] = None,
+ stop_container: bool = True,
+ execution_policies: Optional[Dict[str, bool]] = None,
+ ):
+ """(Experimental) A code executor class that executes code through
+ a command line environment in a kubernetes pod.
+
+ The executor first saves each code block in a file in the working
+ directory, and then executes the code file in the container.
+ The executor executes the code blocks in the order they are received.
+ Currently, the executor only supports Python and shell scripts.
+ For Python code, use the language "python" for the code block.
+ For shell scripts, use the language "bash", "shell", or "sh" for the code
+ block.
+
+ Args:
+ image (_type_, optional): Docker image to use for code execution.
+ Defaults to "python:3-slim".
+ pod_name (Optional[str], optional): Name of the kubernetes pod
+ which is created. If None, will autogenerate a name. Defaults to None.
+ namespace (Optional[str], optional): Namespace of kubernetes pod
+ which is created. If None, will use current namespace of this instance
+ pod_spec (Optional[client.V1Pod], optional): Specification of kubernetes pod.
+ custom pod spec can be provided with this param.
+ if pod_spec is provided, params above(image, pod_name, namespace) are neglected.
+ container_name (Optional[str], optional): Name of the container where code block will be
+ executed. if pod_spec param is provided, container_name must be provided also.
+ timeout (int, optional): The timeout for code execution. Defaults to 60.
+ work_dir (Union[Path, str], optional): The working directory for the code
+ execution. Defaults to Path("/workspace").
+ kube_config_file (Optional[str], optional): kubernetes configuration file path.
+ If None, will use KUBECONFIG environment variables or service account token(incluster config)
+ stop_container (bool, optional): If true, will automatically stop the
+ container when stop is called, when the context manager exits or when
+ the Python process exits with atext. Defaults to True.
+ execution_policies (dict[str, bool], optional): defines supported execution language
+
+ Raises:
+ ValueError: On argument error, or if the container fails to start.
+ """
+ if kube_config_file is None:
+ config.load_config()
+ else:
+ config.load_config(config_file=kube_config_file)
+
+ self._api_client = client.CoreV1Api()
+
+ if timeout < 1:
+ raise ValueError("Timeout must be greater than or equal to 1.")
+ self._timeout = timeout
+
+ if isinstance(work_dir, str):
+ work_dir = Path(work_dir)
+ self._work_dir: Path = work_dir
+
+ if container_name is None:
+ container_name = "autogen-code-exec"
+ self._container_name = container_name
+
+ # Start a container from the image, read to exec commands later
+ if pod_spec:
+ pod = pod_spec
+ else:
+ if pod_name is None:
+ pod_name = f"autogen-code-exec-{uuid.uuid4()}"
+ if namespace is None:
+ namespace_path = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
+ if not Path(namespace_path).is_file():
+ raise ValueError("Namespace where the pod will be launched must be provided")
+ with open(namespace_path, "r") as f:
+ namespace = f.read()
+
+ pod = client.V1Pod(
+ metadata=client.V1ObjectMeta(name=pod_name, namespace=namespace),
+ spec=client.V1PodSpec(
+ restart_policy="Never",
+ containers=[
+ client.V1Container(
+ args=["-c", "while true;do sleep 5; done"],
+ command=["/bin/sh"],
+ name=container_name,
+ image=image,
+ )
+ ],
+ ),
+ )
+
+ try:
+ pod_name = pod.metadata.name
+ namespace = pod.metadata.namespace
+ self._pod = self._api_client.create_namespaced_pod(namespace=namespace, body=pod)
+ except ApiException as e:
+ raise ValueError(f"Creating pod failed: {e}")
+
+ self._wait_for_ready()
+
+ def cleanup() -> None:
+ try:
+ self._api_client.delete_namespaced_pod(pod_name, namespace)
+ except ApiException:
+ pass
+ atexit.unregister(cleanup)
+
+ self._cleanup = cleanup
+
+ if stop_container:
+ atexit.register(cleanup)
+
+ self.execution_policies = self.DEFAULT_EXECUTION_POLICY.copy()
+ if execution_policies is not None:
+ self.execution_policies.update(execution_policies)
+
+ def _wait_for_ready(self, stop_time: float = 0.1) -> None:
+ elapsed_time = 0.0
+ name = self._pod.metadata.name
+ namespace = self._pod.metadata.namespace
+ while True:
+ sleep(stop_time)
+ elapsed_time += stop_time
+ if elapsed_time > self._timeout:
+ raise ValueError(
+ f"pod name {name} on namespace {namespace} is not Ready after timeout {self._timeout} seconds"
+ )
+ try:
+ pod_status = self._api_client.read_namespaced_pod_status(name, namespace)
+ if pod_status.status.phase == "Running":
+ break
+ except ApiException as e:
+ raise ValueError(f"reading pod status failed: {e}")
+
+ @property
+ def timeout(self) -> int:
+ """(Experimental) The timeout for code execution."""
+ return self._timeout
+
+ @property
+ def work_dir(self) -> Path:
+ """(Experimental) The working directory for the code execution."""
+ return self._work_dir
+
+ @property
+ def code_extractor(self) -> CodeExtractor:
+ """(Experimental) Export a code extractor that can be used by an agent."""
+ return MarkdownCodeExtractor()
+
+ def execute_code_blocks(self, code_blocks: List[CodeBlock]) -> CommandLineCodeResult:
+ """(Experimental) Execute the code blocks and return the result.
+
+ Args:
+ code_blocks (List[CodeBlock]): The code blocks to execute.
+
+ Returns:
+ CommandlineCodeResult: The result of the code execution."""
+
+ if len(code_blocks) == 0:
+ raise ValueError("No code blocks to execute.")
+
+ outputs = []
+ files = []
+ last_exit_code = 0
+ for code_block in code_blocks:
+ lang = self.LANGUAGE_ALIASES.get(code_block.language.lower(), code_block.language.lower())
+ if lang not in self.DEFAULT_EXECUTION_POLICY:
+ outputs.append(f"Unsupported language {lang}\n")
+ last_exit_code = 1
+ break
+
+ execute_code = self.execution_policies.get(lang, False)
+ code = silence_pip(code_block.code, lang)
+ if lang in ["bash", "shell", "sh"]:
+ code = "\n".join(["#!/bin/bash", code])
+
+ try:
+ filename = _get_file_name_from_content(code, self._work_dir)
+ except ValueError:
+ outputs.append("Filename is not in the workspace")
+ last_exit_code = 1
+ break
+
+ if not filename:
+ extension = self.LANGUAGE_FILE_EXTENSION.get(lang, lang)
+ filename = f"tmp_code_{md5(code.encode()).hexdigest()}.{extension}"
+
+ code_path = self._work_dir / filename
+
+ exec_script = textwrap.dedent(
+ """
+ if [ ! -d "{workspace}" ]; then
+ mkdir {workspace}
+ fi
+ cat <
{code_path}\n
+ {code}
+ EOM
+ chmod +x {code_path}"""
+ )
+ exec_script = exec_script.format(workspace=str(self._work_dir), code_path=code_path, code=code)
+ stream(
+ self._api_client.connect_get_namespaced_pod_exec,
+ self._pod.metadata.name,
+ self._pod.metadata.namespace,
+ command=["/bin/sh", "-c", exec_script],
+ container=self._container_name,
+ stderr=True,
+ stdin=False,
+ stdout=True,
+ tty=False,
+ )
+
+ files.append(code_path)
+
+ if not execute_code:
+ outputs.append(f"Code saved to {str(code_path)}\n")
+ continue
+
+ resp = stream(
+ self._api_client.connect_get_namespaced_pod_exec,
+ self._pod.metadata.name,
+ self._pod.metadata.namespace,
+ command=["timeout", str(self._timeout), _cmd(lang), str(code_path)],
+ container=self._container_name,
+ stderr=True,
+ stdin=False,
+ stdout=True,
+ tty=False,
+ _preload_content=False,
+ )
+
+ stdout_messages = []
+ stderr_messages = []
+ while resp.is_open():
+ resp.update(timeout=1)
+ if resp.peek_stderr():
+ stderr_messages.append(resp.read_stderr())
+ if resp.peek_stdout():
+ stdout_messages.append(resp.read_stdout())
+ outputs.extend(stdout_messages + stderr_messages)
+ exit_code = resp.returncode
+ resp.close()
+
+ if exit_code == 124:
+ outputs.append("\n" + TIMEOUT_MSG)
+
+ last_exit_code = exit_code
+ if exit_code != 0:
+ break
+
+ code_file = str(files[0]) if files else None
+ return CommandLineCodeResult(exit_code=last_exit_code, output="".join(outputs), code_file=code_file)
+
+ def stop(self) -> None:
+ """(Experimental) Stop the code executor."""
+ self._cleanup()
+
+ def __enter__(self) -> Self:
+ return self
+
+ def __exit__(
+ self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]
+ ) -> None:
+ self.stop()
diff --git a/notebook/agent_memory_using_zep.ipynb b/notebook/agent_memory_using_zep.ipynb
new file mode 100644
index 00000000000..3212f63414f
--- /dev/null
+++ b/notebook/agent_memory_using_zep.ipynb
@@ -0,0 +1,532 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Building an Agent with Long-term Memory using Autogen and Zep\n",
+ "\n",
+ "This notebook walks through how to build an Autogen Agent with long-term memory. Zep builds a knowledge graph from user interactions with the agent, enabling the agent to recall relevant facts from previous conversations or user interactions.\n",
+ "\n",
+ "In this notebook we will:\n",
+ "- Create an Autogen Agent class that extends `ConversableAgent` by adding long-term memory\n",
+ "- Create a Mental Health Assistant Agent, CareBot, that acts as a counselor and coach.\n",
+ "- Create a user Agent, Cathy, who stands in for our expected user.\n",
+ "- Demonstrate preloading chat history into Zep.\n",
+ "- Demonstrate the agents in conversation, with CareBot recalling facts from previous conversations with Cathy.\n",
+ "- Inspect Facts within Zep, and demonstrate how to use Zep's Fact Ratings to improve the quality of returned facts.\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Requirements\n",
+ "\n",
+ "````{=mdx}\n",
+ ":::info Requirements\n",
+ "Some extra dependencies are needed for this notebook, which can be installed via pip:\n",
+ "\n",
+ "```bash\n",
+ "pip install autogen~=0.3 zep-cloud python-dotenv\n",
+ "```\n",
+ "\n",
+ "For more information, please refer to the [installation guide](/docs/installation/).\n",
+ ":::\n",
+ "````"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "flaml.automl is not available. Please install flaml[automl] to enable AutoML functionalities.\n"
+ ]
+ }
+ ],
+ "source": [
+ "import os\n",
+ "import uuid\n",
+ "from typing import Dict, Union\n",
+ "\n",
+ "from dotenv import load_dotenv\n",
+ "\n",
+ "from autogen import Agent, ConversableAgent\n",
+ "\n",
+ "load_dotenv()\n",
+ "\n",
+ "config_list = [\n",
+ " {\n",
+ " \"model\": \"gpt-4o-mini\",\n",
+ " \"api_key\": os.environ.get(\"OPENAI_API_KEY\"),\n",
+ " \"max_tokens\": 1024,\n",
+ " }\n",
+ "]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## initiualize the Zep Client\n",
+ "\n",
+ "You can sign up for a Zep account here: https://www.getzep.com/"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from zep_cloud import FactRatingExamples, FactRatingInstruction, Message\n",
+ "from zep_cloud.client import AsyncZep\n",
+ "\n",
+ "MIN_FACT_RATING = 0.3\n",
+ "\n",
+ "# Configure Zep\n",
+ "zep = AsyncZep(api_key=os.environ.get(\"ZEP_API_KEY\"))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def convert_to_zep_messages(chat_history: list[dict[str, str | None]]) -> list[Message]:\n",
+ " \"\"\"\n",
+ " Convert chat history to Zep messages.\n",
+ "\n",
+ " Args:\n",
+ " chat_history (list): List of dictionaries containing chat messages.\n",
+ "\n",
+ " Returns:\n",
+ " list: List of Zep Message objects.\n",
+ " \"\"\"\n",
+ " return [\n",
+ " Message(\n",
+ " role_type=msg[\"role\"],\n",
+ " role=msg.get(\"name\", None),\n",
+ " content=msg[\"content\"],\n",
+ " )\n",
+ " for msg in chat_history\n",
+ " ]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## ZepConversableAgent\n",
+ "\n",
+ "The `ZepConversableAgent` is a custom implementation of the `ConversableAgent` that integrates with Zep for long-term memory management. This class extends the functionality of the base `ConversableAgent` by adding Zep-specific features for persisting and retrieving facts from long-term memory."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ZepConversableAgent(ConversableAgent):\n",
+ " \"\"\"\n",
+ " A custom ConversableAgent that integrates with Zep for long-term memory.\n",
+ " \"\"\"\n",
+ "\n",
+ " def __init__(\n",
+ " self,\n",
+ " name: str,\n",
+ " system_message: str,\n",
+ " llm_config: dict,\n",
+ " function_map: dict,\n",
+ " human_input_mode: str,\n",
+ " zep_session_id: str,\n",
+ " ):\n",
+ " super().__init__(\n",
+ " name=name,\n",
+ " system_message=system_message,\n",
+ " llm_config=llm_config,\n",
+ " function_map=function_map,\n",
+ " human_input_mode=human_input_mode,\n",
+ " )\n",
+ " self.zep_session_id = zep_session_id\n",
+ " # store the original system message as we will update it with relevant facts from Zep\n",
+ " self.original_system_message = system_message\n",
+ " self.register_hook(\"a_process_last_received_message\", self.persist_user_messages)\n",
+ " self.register_hook(\"a_process_message_before_send\", self.persist_assistant_messages)\n",
+ "\n",
+ " async def persist_assistant_messages(\n",
+ " self, sender: Agent, message: Union[Dict, str], recipient: Agent, silent: bool\n",
+ " ):\n",
+ " \"\"\"Agent sends a message to the user. Add the message to Zep.\"\"\"\n",
+ "\n",
+ " # Assume message is a string\n",
+ " zep_messages = convert_to_zep_messages([{\"role\": \"assistant\", \"name\": self.name, \"content\": message}])\n",
+ " await zep.memory.add(session_id=self.zep_session_id, messages=zep_messages)\n",
+ "\n",
+ " return message\n",
+ "\n",
+ " async def persist_user_messages(self, messages: list[dict[str, str]] | str):\n",
+ " \"\"\"\n",
+ " User sends a message to the agent. Add the message to Zep and\n",
+ " update the system message with relevant facts from Zep.\n",
+ " \"\"\"\n",
+ " # Assume messages is a string\n",
+ " zep_messages = convert_to_zep_messages([{\"role\": \"user\", \"content\": messages}])\n",
+ " await zep.memory.add(session_id=self.zep_session_id, messages=zep_messages)\n",
+ "\n",
+ " memory = await zep.memory.get(self.zep_session_id, min_rating=MIN_FACT_RATING)\n",
+ "\n",
+ " # Update the system message with the relevant facts retrieved from Zep\n",
+ " self.update_system_message(\n",
+ " self.original_system_message\n",
+ " + f\"\\n\\nRelevant facts about the user and their prior conversation:\\n{memory.relevant_facts}\"\n",
+ " )\n",
+ "\n",
+ " return messages"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Zep User and Session Management\n",
+ "\n",
+ "### Zep User\n",
+ "A Zep User represents an individual interacting with your application. Each User can have multiple Sessions associated with them, allowing you to track and manage interactions over time. The unique identifier for each user is their `UserID`, which can be any string value (e.g., username, email address, or UUID).\n",
+ "\n",
+ "### Zep Session\n",
+ "A Session represents a conversation and can be associated with Users in a one-to-many relationship. Chat messages are added to Sessions, with each session having many messages.\n",
+ "\n",
+ "### Fact Rating\n",
+ " \n",
+ "Fact Rating is a feature in Zep that allows you to rate the importance or relevance of facts extracted from conversations. This helps in prioritizing and filtering information when retrieving memory artifacts. Here, we rate facts based on poignancy. We provide a definition of poignancy and several examples of highly poignant and low-poignancy facts. When retrieving memory, you can use the `min_rating` parameter to filter facts based on their importance.\n",
+ " \n",
+ "Fact Rating helps ensure the most relevant information, especially in long or complex conversations, is used to ground the agent.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Session(classifications=None, created_at='2024-10-07T21:12:13.952672Z', deleted_at=None, ended_at=None, fact_rating_instruction=FactRatingInstruction(examples=FactRatingExamples(high=\"The user received news of a family member's serious illness.\", low='The user bought a new brand of toothpaste.', medium='The user completed a challenging marathon.'), instruction='Rate the facts by poignancy. Highly poignant \\nfacts have a significant emotional impact or relevance to the user. \\nLow poignant facts are minimally relevant or of little emotional \\nsignificance.'), fact_version_uuid=None, facts=None, id=774, metadata=None, project_uuid='00000000-0000-0000-0000-000000000000', session_id='f3854ad0-5bd4-4814-a814-ec0880817953', updated_at='2024-10-07T21:12:13.952672Z', user_id='Cathy1023', uuid_='31ab3314-5ac8-4361-ad11-848fb7befedf')"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "bot_name = \"CareBot\"\n",
+ "user_name = \"Cathy\"\n",
+ "\n",
+ "user_id = user_name + str(uuid.uuid4())[:4]\n",
+ "session_id = str(uuid.uuid4())\n",
+ "\n",
+ "await zep.user.add(user_id=user_id)\n",
+ "\n",
+ "fact_rating_instruction = \"\"\"Rate the facts by poignancy. Highly poignant\n",
+ " facts have a significant emotional impact or relevance to the user.\n",
+ " Low poignant facts are minimally relevant or of little emotional significance.\n",
+ "\"\"\"\n",
+ "\n",
+ "fact_rating_examples = FactRatingExamples(\n",
+ " high=\"The user received news of a family member's serious illness.\",\n",
+ " medium=\"The user completed a challenging marathon.\",\n",
+ " low=\"The user bought a new brand of toothpaste.\",\n",
+ ")\n",
+ "\n",
+ "await zep.memory.add_session(\n",
+ " user_id=user_id,\n",
+ " session_id=session_id,\n",
+ " fact_rating_instruction=FactRatingInstruction(\n",
+ " instruction=fact_rating_instruction,\n",
+ " examples=fact_rating_examples,\n",
+ " ),\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Preload a prior conversation into Zep\n",
+ "\n",
+ "We'll load a prior conversation into long-term memory. We'll use facts derived from this conversation when Cathy restarts the conversation with CareBot, ensuring Carebot has context."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "SuccessResponse(message='OK')"
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "chat_history = [\n",
+ " {\n",
+ " \"role\": \"assistant\",\n",
+ " \"name\": \"carebot\",\n",
+ " \"content\": \"Hi Cathy, how are you doing today?\",\n",
+ " },\n",
+ " {\n",
+ " \"role\": \"user\",\n",
+ " \"name\": \"Cathy\",\n",
+ " \"content\": \"To be honest, I've been feeling a bit down and demotivated lately. It's been tough.\",\n",
+ " },\n",
+ " {\n",
+ " \"role\": \"assistant\",\n",
+ " \"name\": \"CareBot\",\n",
+ " \"content\": \"I'm sorry to hear that you're feeling down and demotivated, Cathy. It's understandable given the challenges you're facing. Can you tell me more about what's been going on?\",\n",
+ " },\n",
+ " {\n",
+ " \"role\": \"user\",\n",
+ " \"name\": \"Cathy\",\n",
+ " \"content\": \"Well, I'm really struggling to process the passing of my mother.\",\n",
+ " },\n",
+ " {\n",
+ " \"role\": \"assistant\",\n",
+ " \"name\": \"CareBot\",\n",
+ " \"content\": \"I'm deeply sorry for your loss, Cathy. Losing a parent is incredibly difficult. It's normal to struggle with grief, and there's no 'right' way to process it. Would you like to talk about your mother or how you're coping?\",\n",
+ " },\n",
+ " {\n",
+ " \"role\": \"user\",\n",
+ " \"name\": \"Cathy\",\n",
+ " \"content\": \"Yes, I'd like to talk about my mother. She was a kind and loving person.\",\n",
+ " },\n",
+ "]\n",
+ "\n",
+ "# Convert chat history to Zep messages\n",
+ "zep_messages = convert_to_zep_messages(chat_history)\n",
+ "\n",
+ "await zep.memory.add(session_id=session_id, messages=zep_messages)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Review all facts in Zep\n",
+ "\n",
+ "We query all session facts for this user session. Only facts that meet the `MIN_FACT_RATING` threshold are returned."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "created_at='2024-10-07T21:12:15.96584Z' fact='Cathy describes her mother as a kind and loving person.' rating=0.5 uuid_='6a086a73-d4b8-4c1b-9b2f-08d5d326d813'\n",
+ "created_at='2024-10-07T21:12:15.96584Z' fact='Cathy has been feeling down and demotivated lately.' rating=0.5 uuid_='e19d959c-2a01-4cc7-9d49-108719f1a749'\n",
+ "created_at='2024-10-07T21:12:15.96584Z' fact='Cathy is struggling to process the passing of her mother.' rating=0.75 uuid_='d6c12a5d-d2a0-486e-b25d-3d4bdc5ff466'\n"
+ ]
+ }
+ ],
+ "source": [
+ "response = await zep.memory.get_session_facts(session_id=session_id, min_rating=MIN_FACT_RATING)\n",
+ "\n",
+ "for r in response.facts:\n",
+ " print(r)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Create the Autogen agent, CareBot, an instance of `ZepConversableAgent`\n",
+ "\n",
+ "We pass in the current `session_id` into the CareBot agent which allows it to retrieve relevant facts related to the conversation with Cathy."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "carebot_system_message = \"\"\"\n",
+ "You are a compassionate mental health bot and caregiver. Review information about the user and their prior conversation below and respond accordingly.\n",
+ "Keep responses empathetic and supportive. And remember, always prioritize the user's well-being and mental health. Keep your responses very concise and to the point.\n",
+ "\"\"\"\n",
+ "\n",
+ "agent = ZepConversableAgent(\n",
+ " bot_name,\n",
+ " system_message=carebot_system_message,\n",
+ " llm_config={\"config_list\": config_list},\n",
+ " function_map=None, # No registered functions, by default it is None.\n",
+ " human_input_mode=\"NEVER\", # Never ask for human input.\n",
+ " zep_session_id=session_id,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Create the Autogen agent, Cathy\n",
+ "\n",
+ "Cathy is a stand-in for a human. When building a production application, you'd replace Cathy with a human-in-the-loop pattern.\n",
+ "\n",
+ "**Note** that we're instructing Cathy to start the conversation with CareBit by asking about her previous session. This is an opportunity for us to test whether fact retrieval from Zep's long-term memory is working. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "cathy = ConversableAgent(\n",
+ " user_name,\n",
+ " system_message=\"You are returning to your conversation with CareBot, a mental health bot. Ask the bot about your previous session.\",\n",
+ " llm_config={\"config_list\": config_list},\n",
+ " human_input_mode=\"NEVER\", # Never ask for human input.\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Start the conversation\n",
+ "\n",
+ "We use Autogen's `a_initiate_chat` method to get the two agents conversing. CareBot is the primary agent.\n",
+ "\n",
+ "**NOTE** how Carebot is able to recall the past conversation about Cathy's mother in detail, having had relevant facts from Zep added to its system prompt."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "result = await agent.a_initiate_chat(\n",
+ " cathy,\n",
+ " message=\"Hi Cathy, nice to see you again. How are you doing today?\",\n",
+ " max_turns=3,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Review current facts in Zep\n",
+ "\n",
+ "Let's see how the facts have evolved as the conversation has progressed."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "created_at='2024-10-07T20:04:28.397184Z' fact=\"Cathy wants to reflect on a previous conversation about her mother and explore the topic of her mother's passing further.\" rating=0.75 uuid_='56488eeb-d8ac-4b2f-8acc-75f71b56ad76'\n",
+ "created_at='2024-10-07T20:04:28.397184Z' fact='Cathy is struggling to process the passing of her mother and has been feeling down and demotivated lately.' rating=0.75 uuid_='0fea3f05-ed1a-4e39-a092-c91f8af9e501'\n",
+ "created_at='2024-10-07T20:04:28.397184Z' fact='Cathy describes her mother as a kind and loving person.' rating=0.5 uuid_='131de203-2984-4cba-9aef-e500611f06d9'\n"
+ ]
+ }
+ ],
+ "source": [
+ "response = await zep.memory.get_session_facts(session_id, min_rating=MIN_FACT_RATING)\n",
+ "\n",
+ "for r in response.facts:\n",
+ " print(r)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Search over Facts in Zep's long-term memory\n",
+ "\n",
+ "In addition to the `memory.get` method which uses the current conversation to retrieve facts, we can also search Zep with our own keywords. Here, we retrieve facts using a query. Again, we use fact ratings to limit the returned facts to only those with a high poignancy rating.\n",
+ "\n",
+ "The `memory.search_sessions` API may be used as an Agent tool, enabling an agent to search across user memory for relevant facts."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "created_at='2024-10-07T20:04:28.397184Z' fact='Cathy describes her mother as a kind and loving person.' rating=0.5 uuid_='131de203-2984-4cba-9aef-e500611f06d9'\n",
+ "created_at='2024-10-07T20:04:28.397184Z' fact='Cathy is struggling to process the passing of her mother and has been feeling down and demotivated lately.' rating=0.75 uuid_='0fea3f05-ed1a-4e39-a092-c91f8af9e501'\n",
+ "created_at='2024-10-07T20:04:28.397184Z' fact=\"Cathy wants to reflect on a previous conversation about her mother and explore the topic of her mother's passing further.\" rating=0.75 uuid_='56488eeb-d8ac-4b2f-8acc-75f71b56ad76'\n"
+ ]
+ }
+ ],
+ "source": [
+ "response = await zep.memory.search_sessions(\n",
+ " text=\"What do you know about Cathy's family?\",\n",
+ " user_id=user_id,\n",
+ " search_scope=\"facts\",\n",
+ " min_fact_rating=MIN_FACT_RATING,\n",
+ ")\n",
+ "\n",
+ "for r in response.results:\n",
+ " print(r.fact)"
+ ]
+ }
+ ],
+ "metadata": {
+ "front_matter": {
+ "tags": [
+ "memory"
+ ],
+ "description": "Agent Memory with Zep."
+ },
+ "kernelspec": {
+ "display_name": ".venv",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.9"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/notebook/agentchat_RetrieveChat_couchbase.ipynb b/notebook/agentchat_RetrieveChat_couchbase.ipynb
new file mode 100644
index 00000000000..17e9284a379
--- /dev/null
+++ b/notebook/agentchat_RetrieveChat_couchbase.ipynb
@@ -0,0 +1,579 @@
+{
+ "cells": [
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Using RetrieveChat Powered by Couchbase Capella for Retrieve Augmented Code Generation and Question Answering\n",
+ "\n",
+ "AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n",
+ "Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n",
+ "\n",
+ "RetrieveChat is a conversational system for retrieval-augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `AssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `RetrieveUserProxyAgent` implement a different auto-reply mechanism corresponding to the RetrieveChat prompts.\n",
+ "\n",
+ "## Table of Contents\n",
+ "We'll demonstrate six examples of using RetrieveChat for code generation and question answering:\n",
+ "\n",
+ "- [Example 1: Generate code based off docstrings w/o human feedback](#example-1)\n",
+ "\n",
+ "````{=mdx}\n",
+ ":::info Requirements\n",
+ "Some extra dependencies are needed for this notebook, which can be installed via pip:\n",
+ "\n",
+ "```bash\n",
+ "pip install pyautogen[retrievechat-couchbase] flaml[automl]\n",
+ "```\n",
+ "\n",
+ "For more information, please refer to the [installation guide](/docs/installation/).\n",
+ ":::\n",
+ "````\n",
+ "\n",
+ "Ensure you have a Couchbase Capella cluster running. Read more on how to get started [here](https://docs.couchbase.com/cloud/get-started/intro.html)"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Set your API Endpoint\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "models to use: ['gpt-4o-mini']\n"
+ ]
+ }
+ ],
+ "source": [
+ "import os\n",
+ "import sys\n",
+ "\n",
+ "from autogen import AssistantAgent\n",
+ "\n",
+ "sys.path.append(os.path.abspath(\"/workspaces/autogen/autogen/agentchat/contrib\"))\n",
+ "\n",
+ "from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent\n",
+ "\n",
+ "# Accepted file formats for that can be stored in\n",
+ "# a vector database instance\n",
+ "from autogen.retrieve_utils import TEXT_FORMATS\n",
+ "\n",
+ "config_list = [{\"model\": \"gpt-4o-mini\", \"api_key\": os.environ[\"OPENAI_API_KEY\"], \"api_type\": \"openai\"}]\n",
+ "assert len(config_list) > 0\n",
+ "print(\"models to use: \", [config_list[i][\"model\"] for i in range(len(config_list))])"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "````{=mdx}\n",
+ ":::tip\n",
+ "Learn more about configuring LLMs for agents [here](/docs/topics/llm_configuration).\n",
+ ":::\n",
+ "````\n",
+ "\n",
+ "## Construct agents for RetrieveChat\n",
+ "\n",
+ "We start by initializing the `AssistantAgent` and `RetrieveUserProxyAgent`. The system message needs to be set to \"You are a helpful assistant.\" for AssistantAgent. The detailed instructions are given in the user message. Later we will use the `RetrieveUserProxyAgent.message_generator` to combine the instructions and a retrieval augmented generation task for an initial prompt to be sent to the LLM assistant."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Accepted file formats for `docs_path`:\n",
+ "['txt', 'json', 'csv', 'tsv', 'md', 'html', 'htm', 'rtf', 'rst', 'jsonl', 'log', 'xml', 'yaml', 'yml', 'pdf']\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"Accepted file formats for `docs_path`:\")\n",
+ "print(TEXT_FORMATS)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# 1. create an AssistantAgent instance named \"assistant\"\n",
+ "assistant = AssistantAgent(\n",
+ " name=\"assistant\",\n",
+ " system_message=\"You are a helpful assistant.\",\n",
+ " llm_config={\n",
+ " \"timeout\": 600,\n",
+ " \"cache_seed\": 42,\n",
+ " \"config_list\": config_list,\n",
+ " },\n",
+ ")\n",
+ "\n",
+ "# 2. create the RetrieveUserProxyAgent instance named \"ragproxyagent\"\n",
+ "# Refer to https://microsoft.github.io/autogen/docs/reference/agentchat/contrib/retrieve_user_proxy_agent\n",
+ "# and https://microsoft.github.io/autogen/docs/reference/agentchat/contrib/vectordb/couchbase\n",
+ "# for more information on the RetrieveUserProxyAgent and CouchbaseVectorDB\n",
+ "ragproxyagent = RetrieveUserProxyAgent(\n",
+ " name=\"ragproxyagent\",\n",
+ " human_input_mode=\"NEVER\",\n",
+ " max_consecutive_auto_reply=3,\n",
+ " retrieve_config={\n",
+ " \"task\": \"code\",\n",
+ " \"docs_path\": [\n",
+ " \"https://raw.githubusercontent.com/microsoft/FLAML/main/website/docs/Examples/Integrate%20-%20Spark.md\",\n",
+ " \"https://raw.githubusercontent.com/microsoft/FLAML/main/website/docs/Research.md\",\n",
+ " ],\n",
+ " \"chunk_token_size\": 2000,\n",
+ " \"model\": config_list[0][\"model\"],\n",
+ " \"vector_db\": \"couchbase\", # Couchbase Capella VectorDB\n",
+ " \"collection_name\": \"demo_collection\", # Couchbase Capella collection name to be utilized/created\n",
+ " \"db_config\": {\n",
+ " \"connection_string\": os.environ[\"CB_CONN_STR\"], # Couchbase Capella connection string\n",
+ " \"username\": os.environ[\"CB_USERNAME\"], # Couchbase Capella username\n",
+ " \"password\": os.environ[\"CB_PASSWORD\"], # Couchbase Capella password\n",
+ " \"bucket_name\": \"test_db\", # Couchbase Capella bucket name\n",
+ " \"scope_name\": \"test_scope\", # Couchbase Capella scope name\n",
+ " \"index_name\": \"vector_index\", # Couchbase Capella index name to be created\n",
+ " },\n",
+ " \"get_or_create\": True, # set to False if you don't want to reuse an existing collection\n",
+ " \"overwrite\": False, # set to True if you want to overwrite an existing collection, each overwrite will force a index creation and reupload of documents\n",
+ " },\n",
+ " code_execution_config=False, # set to False if you don't want to execute the code\n",
+ ")"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Example 1\n",
+ "\n",
+ "[Back to top](#table-of-contents)\n",
+ "\n",
+ "Use RetrieveChat to help generate sample code and automatically run the code and fix errors if there is any.\n",
+ "\n",
+ "Problem: Which API should I use if I want to use FLAML for a classification task and I want to train the model in 30 seconds. Use spark to parallel the training. Force cancel jobs if time limit is reached.\n",
+ "\n",
+ "Note: You may need to create an index on the cluster to query"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "2024-10-16 12:08:07,062 - autogen.agentchat.contrib.retrieve_user_proxy_agent - INFO - \u001b[32mUse the existing collection `demo_collection`.\u001b[0m\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Trying to create collection.\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "2024-10-16 12:08:07,953 - autogen.agentchat.contrib.retrieve_user_proxy_agent - INFO - Found 2 chunks.\u001b[0m\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "VectorDB returns doc_ids: [['bdfbc921', '7968cf3c']]\n",
+ "\u001b[32mAdding content of doc bdfbc921 to context.\u001b[0m\n",
+ "\u001b[32mAdding content of doc 7968cf3c to context.\u001b[0m\n",
+ "\u001b[33mragproxyagent\u001b[0m (to assistant):\n",
+ "\n",
+ "You're a retrieve augmented coding assistant. You answer user's questions based on your own knowledge and the\n",
+ "context provided by the user.\n",
+ "If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\n",
+ "For code generation, you must obey the following rules:\n",
+ "Rule 1. You MUST NOT install any packages because all the packages needed are already installed.\n",
+ "Rule 2. You must follow the formats below to write your code:\n",
+ "```language\n",
+ "# your code\n",
+ "```\n",
+ "\n",
+ "User's question is: How can I use FLAML to perform a classification task and use spark to do parallel training. Train 30 seconds and force cancel jobs if time limit is reached.\n",
+ "\n",
+ "Context is: # Integrate - Spark\n",
+ "\n",
+ "FLAML has integrated Spark for distributed training. There are two main aspects of integration with Spark:\n",
+ "\n",
+ "- Use Spark ML estimators for AutoML.\n",
+ "- Use Spark to run training in parallel spark jobs.\n",
+ "\n",
+ "## Spark ML Estimators\n",
+ "\n",
+ "FLAML integrates estimators based on Spark ML models. These models are trained in parallel using Spark, so we called them Spark estimators. To use these models, you first need to organize your data in the required format.\n",
+ "\n",
+ "### Data\n",
+ "\n",
+ "For Spark estimators, AutoML only consumes Spark data. FLAML provides a convenient function `to_pandas_on_spark` in the `flaml.automl.spark.utils` module to convert your data into a pandas-on-spark (`pyspark.pandas`) dataframe/series, which Spark estimators require.\n",
+ "\n",
+ "This utility function takes data in the form of a `pandas.Dataframe` or `pyspark.sql.Dataframe` and converts it into a pandas-on-spark dataframe. It also takes `pandas.Series` or `pyspark.sql.Dataframe` and converts it into a [pandas-on-spark](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/index.html) series. If you pass in a `pyspark.pandas.Dataframe`, it will not make any changes.\n",
+ "\n",
+ "This function also accepts optional arguments `index_col` and `default_index_type`.\n",
+ "\n",
+ "- `index_col` is the column name to use as the index, default is None.\n",
+ "- `default_index_type` is the default index type, default is \"distributed-sequence\". More info about default index type could be found on Spark official [documentation](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/options.html#default-index-type)\n",
+ "\n",
+ "Here is an example code snippet for Spark Data:\n",
+ "\n",
+ "```python\n",
+ "import pandas as pd\n",
+ "from flaml.automl.spark.utils import to_pandas_on_spark\n",
+ "\n",
+ "# Creating a dictionary\n",
+ "data = {\n",
+ " \"Square_Feet\": [800, 1200, 1800, 1500, 850],\n",
+ " \"Age_Years\": [20, 15, 10, 7, 25],\n",
+ " \"Price\": [100000, 200000, 300000, 240000, 120000],\n",
+ "}\n",
+ "\n",
+ "# Creating a pandas DataFrame\n",
+ "dataframe = pd.DataFrame(data)\n",
+ "label = \"Price\"\n",
+ "\n",
+ "# Convert to pandas-on-spark dataframe\n",
+ "psdf = to_pandas_on_spark(dataframe)\n",
+ "```\n",
+ "\n",
+ "To use Spark ML models you need to format your data appropriately. Specifically, use [`VectorAssembler`](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.feature.VectorAssembler.html) to merge all feature columns into a single vector column.\n",
+ "\n",
+ "Here is an example of how to use it:\n",
+ "\n",
+ "```python\n",
+ "from pyspark.ml.feature import VectorAssembler\n",
+ "\n",
+ "columns = psdf.columns\n",
+ "feature_cols = [col for col in columns if col != label]\n",
+ "featurizer = VectorAssembler(inputCols=feature_cols, outputCol=\"features\")\n",
+ "psdf = featurizer.transform(psdf.to_spark(index_col=\"index\"))[\"index\", \"features\"]\n",
+ "```\n",
+ "\n",
+ "Later in conducting the experiment, use your pandas-on-spark data like non-spark data and pass them using `X_train, y_train` or `dataframe, label`.\n",
+ "\n",
+ "### Estimators\n",
+ "\n",
+ "#### Model List\n",
+ "\n",
+ "- `lgbm_spark`: The class for fine-tuning Spark version LightGBM models, using [SynapseML](https://microsoft.github.io/SynapseML/docs/features/lightgbm/about/) API.\n",
+ "\n",
+ "#### Usage\n",
+ "\n",
+ "First, prepare your data in the required format as described in the previous section.\n",
+ "\n",
+ "By including the models you intend to try in the `estimators_list` argument to `flaml.automl`, FLAML will start trying configurations for these models. If your input is Spark data, FLAML will also use estimators with the `_spark` postfix by default, even if you haven't specified them.\n",
+ "\n",
+ "Here is an example code snippet using SparkML models in AutoML:\n",
+ "\n",
+ "```python\n",
+ "import flaml\n",
+ "\n",
+ "# prepare your data in pandas-on-spark format as we previously mentioned\n",
+ "\n",
+ "automl = flaml.AutoML()\n",
+ "settings = {\n",
+ " \"time_budget\": 30,\n",
+ " \"metric\": \"r2\",\n",
+ " \"estimator_list\": [\"lgbm_spark\"], # this setting is optional\n",
+ " \"task\": \"regression\",\n",
+ "}\n",
+ "\n",
+ "automl.fit(\n",
+ " dataframe=psdf,\n",
+ " label=label,\n",
+ " **settings,\n",
+ ")\n",
+ "```\n",
+ "\n",
+ "[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb)\n",
+ "\n",
+ "## Parallel Spark Jobs\n",
+ "\n",
+ "You can activate Spark as the parallel backend during parallel tuning in both [AutoML](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning) and [Hyperparameter Tuning](/docs/Use-Cases/Tune-User-Defined-Function#parallel-tuning), by setting the `use_spark` to `true`. FLAML will dispatch your job to the distributed Spark backend using [`joblib-spark`](https://github.com/joblib/joblib-spark).\n",
+ "\n",
+ "Please note that you should not set `use_spark` to `true` when applying AutoML and Tuning for Spark Data. This is because only SparkML models will be used for Spark Data in AutoML and Tuning. As SparkML models run in parallel, there is no need to distribute them with `use_spark` again.\n",
+ "\n",
+ "All the Spark-related arguments are stated below. These arguments are available in both Hyperparameter Tuning and AutoML:\n",
+ "\n",
+ "- `use_spark`: boolean, default=False | Whether to use spark to run the training in parallel spark jobs. This can be used to accelerate training on large models and large datasets, but will incur more overhead in time and thus slow down training in some cases. GPU training is not supported yet when use_spark is True. For Spark clusters, by default, we will launch one trial per executor. However, sometimes we want to launch more trials than the number of executors (e.g., local mode). In this case, we can set the environment variable `FLAML_MAX_CONCURRENT` to override the detected `num_executors`. The final number of concurrent trials will be the minimum of `n_concurrent_trials` and `num_executors`.\n",
+ "- `n_concurrent_trials`: int, default=1 | The number of concurrent trials. When n_concurrent_trials > 1, FLAML performes parallel tuning.\n",
+ "- `force_cancel`: boolean, default=False | Whether to forcely cancel Spark jobs if the search time exceeded the time budget. Spark jobs include parallel tuning jobs and Spark-based model training jobs.\n",
+ "\n",
+ "An example code snippet for using parallel Spark jobs:\n",
+ "\n",
+ "```python\n",
+ "import flaml\n",
+ "\n",
+ "automl_experiment = flaml.AutoML()\n",
+ "automl_settings = {\n",
+ " \"time_budget\": 30,\n",
+ " \"metric\": \"r2\",\n",
+ " \"task\": \"regression\",\n",
+ " \"n_concurrent_trials\": 2,\n",
+ " \"use_spark\": True,\n",
+ " \"force_cancel\": True, # Activating the force_cancel option can immediately halt Spark jobs once they exceed the allocated time_budget.\n",
+ "}\n",
+ "\n",
+ "automl.fit(\n",
+ " dataframe=dataframe,\n",
+ " label=label,\n",
+ " **automl_settings,\n",
+ ")\n",
+ "```\n",
+ "\n",
+ "[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb)\n",
+ "# Research\n",
+ "\n",
+ "For technical details, please check our research publications.\n",
+ "\n",
+ "- [FLAML: A Fast and Lightweight AutoML Library](https://www.microsoft.com/en-us/research/publication/flaml-a-fast-and-lightweight-automl-library/). Chi Wang, Qingyun Wu, Markus Weimer, Erkang Zhu. MLSys 2021.\n",
+ "\n",
+ "```bibtex\n",
+ "@inproceedings{wang2021flaml,\n",
+ " title={FLAML: A Fast and Lightweight AutoML Library},\n",
+ " author={Chi Wang and Qingyun Wu and Markus Weimer and Erkang Zhu},\n",
+ " year={2021},\n",
+ " booktitle={MLSys},\n",
+ "}\n",
+ "```\n",
+ "\n",
+ "- [Frugal Optimization for Cost-related Hyperparameters](https://arxiv.org/abs/2005.01571). Qingyun Wu, Chi Wang, Silu Huang. AAAI 2021.\n",
+ "\n",
+ "```bibtex\n",
+ "@inproceedings{wu2021cfo,\n",
+ " title={Frugal Optimization for Cost-related Hyperparameters},\n",
+ " author={Qingyun Wu and Chi Wang and Silu Huang},\n",
+ " year={2021},\n",
+ " booktitle={AAAI},\n",
+ "}\n",
+ "```\n",
+ "\n",
+ "- [Economical Hyperparameter Optimization With Blended Search Strategy](https://www.microsoft.com/en-us/research/publication/economical-hyperparameter-optimization-with-blended-search-strategy/). Chi Wang, Qingyun Wu, Silu Huang, Amin Saied. ICLR 2021.\n",
+ "\n",
+ "```bibtex\n",
+ "@inproceedings{wang2021blendsearch,\n",
+ " title={Economical Hyperparameter Optimization With Blended Search Strategy},\n",
+ " author={Chi Wang and Qingyun Wu and Silu Huang and Amin Saied},\n",
+ " year={2021},\n",
+ " booktitle={ICLR},\n",
+ "}\n",
+ "```\n",
+ "\n",
+ "- [An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models](https://aclanthology.org/2021.acl-long.178.pdf). Susan Xueqing Liu, Chi Wang. ACL 2021.\n",
+ "\n",
+ "```bibtex\n",
+ "@inproceedings{liuwang2021hpolm,\n",
+ " title={An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models},\n",
+ " author={Susan Xueqing Liu and Chi Wang},\n",
+ " year={2021},\n",
+ " booktitle={ACL},\n",
+ "}\n",
+ "```\n",
+ "\n",
+ "- [ChaCha for Online AutoML](https://www.microsoft.com/en-us/research/publication/chacha-for-online-automl/). Qingyun Wu, Chi Wang, John Langford, Paul Mineiro and Marco Rossi. ICML 2021.\n",
+ "\n",
+ "```bibtex\n",
+ "@inproceedings{wu2021chacha,\n",
+ " title={ChaCha for Online AutoML},\n",
+ " author={Qingyun Wu and Chi Wang and John Langford and Paul Mineiro and Marco Rossi},\n",
+ " year={2021},\n",
+ " booktitle={ICML},\n",
+ "}\n",
+ "```\n",
+ "\n",
+ "- [Fair AutoML](https://arxiv.org/abs/2111.06495). Qingyun Wu, Chi Wang. ArXiv preprint arXiv:2111.06495 (2021).\n",
+ "\n",
+ "```bibtex\n",
+ "@inproceedings{wuwang2021fairautoml,\n",
+ " title={Fair AutoML},\n",
+ " author={Qingyun Wu and Chi Wang},\n",
+ " year={2021},\n",
+ " booktitle={ArXiv preprint arXiv:2111.06495},\n",
+ "}\n",
+ "```\n",
+ "\n",
+ "- [Mining Robust Default Configurations for Resource-constrained AutoML](https://arxiv.org/abs/2202.09927). Moe Kayali, Chi Wang. ArXiv preprint arXiv:2202.09927 (2022).\n",
+ "\n",
+ "```bibtex\n",
+ "@inproceedings{kayaliwang2022default,\n",
+ " title={Mining Robust Default Configurations for Resource-constrained AutoML},\n",
+ " author={Moe Kayali and Chi Wang},\n",
+ " year={2022},\n",
+ " booktitle={ArXiv preprint arXiv:2202.09927},\n",
+ "}\n",
+ "```\n",
+ "\n",
+ "- [Targeted Hyperparameter Optimization with Lexicographic Preferences Over Multiple Objectives](https://openreview.net/forum?id=0Ij9_q567Ma). Shaokun Zhang, Feiran Jia, Chi Wang, Qingyun Wu. ICLR 2023 (notable-top-5%).\n",
+ "\n",
+ "```bibtex\n",
+ "@inproceedings{zhang2023targeted,\n",
+ " title={Targeted Hyperparameter Optimization with Lexicographic Preferences Over Multiple Objectives},\n",
+ " author={Shaokun Zhang and Feiran Jia and Chi Wang and Qingyun Wu},\n",
+ " booktitle={International Conference on Learning Representations},\n",
+ " year={2023},\n",
+ " url={https://openreview.net/forum?id=0Ij9_q567Ma},\n",
+ "}\n",
+ "```\n",
+ "\n",
+ "- [Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference](https://arxiv.org/abs/2303.04673). Chi Wang, Susan Xueqing Liu, Ahmed H. Awadallah. ArXiv preprint arXiv:2303.04673 (2023).\n",
+ "\n",
+ "```bibtex\n",
+ "@inproceedings{wang2023EcoOptiGen,\n",
+ " title={Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference},\n",
+ " author={Chi Wang and Susan Xueqing Liu and Ahmed H. Awadallah},\n",
+ " year={2023},\n",
+ " booktitle={ArXiv preprint arXiv:2303.04673},\n",
+ "}\n",
+ "```\n",
+ "\n",
+ "- [An Empirical Study on Challenging Math Problem Solving with GPT-4](https://arxiv.org/abs/2306.01337). Yiran Wu, Feiran Jia, Shaokun Zhang, Hangyu Li, Erkang Zhu, Yue Wang, Yin Tat Lee, Richard Peng, Qingyun Wu, Chi Wang. ArXiv preprint arXiv:2306.01337 (2023).\n",
+ "\n",
+ "```bibtex\n",
+ "@inproceedings{wu2023empirical,\n",
+ " title={An Empirical Study on Challenging Math Problem Solving with GPT-4},\n",
+ " author={Yiran Wu and Feiran Jia and Shaokun Zhang and Hangyu Li and Erkang Zhu and Yue Wang and Yin Tat Lee and Richard Peng and Qingyun Wu and Chi Wang},\n",
+ " year={2023},\n",
+ " booktitle={ArXiv preprint arXiv:2306.01337},\n",
+ "}\n",
+ "```\n",
+ "\n",
+ "\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33massistant\u001b[0m (to ragproxyagent):\n",
+ "\n",
+ "```python\n",
+ "import pandas as pd\n",
+ "from pyspark.ml.feature import VectorAssembler\n",
+ "import flaml\n",
+ "from flaml.automl.spark.utils import to_pandas_on_spark\n",
+ "\n",
+ "# Creating a dictionary for the example data\n",
+ "data = {\n",
+ " \"Square_Feet\": [800, 1200, 1800, 1500, 850],\n",
+ " \"Age_Years\": [20, 15, 10, 7, 25],\n",
+ " \"Price\": [100000, 200000, 300000, 240000, 120000],\n",
+ "}\n",
+ "\n",
+ "# Creating a pandas DataFrame\n",
+ "dataframe = pd.DataFrame(data)\n",
+ "label = \"Price\"\n",
+ "\n",
+ "# Convert to pandas-on-spark dataframe\n",
+ "psdf = to_pandas_on_spark(dataframe)\n",
+ "\n",
+ "# Prepare features using VectorAssembler\n",
+ "columns = psdf.columns\n",
+ "feature_cols = [col for col in columns if col != label]\n",
+ "featurizer = VectorAssembler(inputCols=feature_cols, outputCol=\"features\")\n",
+ "psdf = featurizer.transform(psdf.to_spark(index_col=\"index\"))[[\"index\", \"features\"]]\n",
+ "\n",
+ "# Setting up and running FLAML for AutoML with Spark\n",
+ "automl = flaml.AutoML()\n",
+ "automl_settings = {\n",
+ " \"time_budget\": 30, # Set the time budget to 30 seconds\n",
+ " \"metric\": \"r2\", # Performance metric\n",
+ " \"task\": \"regression\", # Problem type\n",
+ " \"n_concurrent_trials\": 2, # Number of concurrent trials\n",
+ " \"use_spark\": True, # Use Spark for parallel jobs\n",
+ " \"force_cancel\": True, # Force cancel jobs if time limit is reached\n",
+ "}\n",
+ "\n",
+ "automl.fit(\n",
+ " dataframe=psdf,\n",
+ " label=label,\n",
+ " **automl_settings\n",
+ ")\n",
+ "```\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33mragproxyagent\u001b[0m (to assistant):\n",
+ "\n",
+ "\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33massistant\u001b[0m (to ragproxyagent):\n",
+ "\n",
+ "UPDATE CONTEXT\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32mUpdating context and resetting conversation.\u001b[0m\n",
+ "VectorDB returns doc_ids: [['bdfbc921', '7968cf3c']]\n",
+ "\u001b[32mNo more context, will terminate.\u001b[0m\n",
+ "\u001b[33mragproxyagent\u001b[0m (to assistant):\n",
+ "\n",
+ "TERMINATE\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n"
+ ]
+ }
+ ],
+ "source": [
+ "# reset the assistant. Always reset the assistant before starting a new conversation.\n",
+ "assistant.reset()\n",
+ "\n",
+ "# given a problem, we use the ragproxyagent to generate a prompt to be sent to the assistant as the initial message.\n",
+ "# the assistant receives the message and generates a response. The response will be sent back to the ragproxyagent for processing.\n",
+ "# The conversation continues until the termination condition is met, in RetrieveChat, the termination condition when no human-in-loop is no code block detected.\n",
+ "# With human-in-loop, the conversation will continue until the user says \"exit\".\n",
+ "code_problem = \"How can I use FLAML to perform a classification task and use spark to do parallel training. Train 30 seconds and force cancel jobs if time limit is reached.\"\n",
+ "chat_result = ragproxyagent.initiate_chat(assistant, message=ragproxyagent.message_generator, problem=code_problem)"
+ ]
+ }
+ ],
+ "metadata": {
+ "front_matter": {
+ "description": "Explore the use of AutoGen's RetrieveChat for tasks like code generation from docstrings, answering complex questions with human feedback, and exploiting features like Update Context, custom prompts, and few-shot learning.",
+ "tags": [
+ "RAG"
+ ]
+ },
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.7"
+ },
+ "skip_test": "Requires interactive usage"
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/notebook/agentchat_webcrawling_with_spider.ipynb b/notebook/agentchat_webcrawling_with_spider.ipynb
new file mode 100644
index 00000000000..45d270b37e5
--- /dev/null
+++ b/notebook/agentchat_webcrawling_with_spider.ipynb
@@ -0,0 +1,426 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Web Scraping using Spider API\n",
+ "\n",
+ "This notebook shows how to use the open \n",
+ "source [Spider](https://spider.cloud/) web crawler together with AutoGen agents."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "First we need to install the Spider SDK and the AutoGen library."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "! pip install -qqq pyautogen spider-client"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Setting up the LLM configuration and the Spider API key is also required."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "config_list = [\n",
+ " {\"model\": \"gpt-4o\", \"api_key\": os.getenv(\"OPENAI_API_KEY\")},\n",
+ "]\n",
+ "\n",
+ "spider_api_key = os.getenv(\"SPIDER_API_KEY\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's define the tool for scraping and crawling data from any website with Spider.\n",
+ "Read more about tool use in this [tutorial chapter](/docs/tutorial/tool-use)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[{'content': 'Spider - The Fastest Web Crawling Service[Spider v1 Logo Spider ](/)[Pricing](/credits/new)[GitHub](https://github.com/spider-rs/spider) [Twitter](https://twitter.com/spider_rust) Toggle ThemeSign InRegisterTo help you get started with Spider, we’ll give you $200 in credits when you spend $100. [Get Credits](/credits/new)LangChain integration [now available](https://python.langchain.com/docs/integrations/document_loaders/spider)The World\\'s Fastest and Cheapest Crawler API==========View Demo* Basic* StreamingExample requestPythonCopy```import requests, osheaders = { \\'Authorization\\': os.environ[\"SPIDER_API_KEY\"], \\'Content-Type\\': \\'application/json\\',}json_data = {\"limit\":50,\"url\":\"http://www.example.com\"}response = requests.post(\\'https://api.spider.cloud/crawl\\', headers=headers, json=json_data)print(response.json())```Example ResponseUnmatched Speed----------### 2.5secs ###To crawl 200 pages### 100-500x ###Faster than alternatives### 500x ###Cheaper than traditional scraping services Benchmarks displaying performance between Spider Cloud, Firecrawl, and Apify.Example used tailwindcss.com - 04/16/2024[See framework benchmarks ](https://github.com/spider-rs/spider/blob/main/benches/BENCHMARKS.md)Foundations for Crawling Effectively----------### Leading in performance ###Spider is written in Rust and runs in full concurrency to achieve crawling dozens of pages in secs.### Optimal response format ###Get clean and formatted markdown, HTML, or text content for fine-tuning or training AI models.### Caching ###Further boost speed by caching repeated web page crawls.### Smart Mode ###Spider dynamically switches to Headless Chrome when it needs to.Beta### Scrape with AI ###Do custom browser scripting and data extraction using the latest AI models.### Best crawler for LLMs ###Don\\'t let crawling and scraping be the highest latency in your LLM & AI agent stack.### Scrape with no headaches ###* Proxy rotations* Agent headers* Avoid anti-bot detections* Headless chrome* Markdown LLM Responses### The Fastest Web Crawler ###* Powered by [spider-rs](https://github.com/spider-rs/spider)* Do 20,000 pages in seconds* Full concurrency* Powerful and simple API* 5,000 requests per minute### Do more with AI ###* Custom browser scripting* Advanced data extraction* Data pipelines* Perfect for LLM and AI Agents* Accurate website labelingSee what\\'s being said----------[ ](https://twitter.com/iammerrick/status/1787873425446572462)[Merrick Christensen](https://twitter.com/iammerrick/status/1787873425446572462)[@iammerrick ](https://twitter.com/iammerrick/status/1787873425446572462)· [Follow](https://twitter.com/intent/follow?screen_name=iammerrick)[](https://twitter.com/iammerrick/status/1787873425446572462)Rust based crawler Spider is next level for crawling & scraping sites. So fast. Their cloud offering is also so easy to use. Good stuff. [ github.com/spider-rs/spid… ](https://github.com/spider-rs/spider)[ 3:53 PM · May 7, 2024 ](https://twitter.com/iammerrick/status/1787873425446572462) [](https://help.twitter.com/en/twitter-for-websites-ads-info-and-privacy)[12 ](https://twitter.com/intent/like?tweet_id=1787873425446572462) [Reply ](https://twitter.com/intent/tweet?in_reply_to=1787873425446572462)[ Read more on Twitter ](https://twitter.com/iammerrick/status/1787873425446572462)[ ](https://twitter.com/WilliamEspegren/status/1789419820821184764)[William Espegren](https://twitter.com/WilliamEspegren/status/1789419820821184764)[@WilliamEspegren ](https://twitter.com/WilliamEspegren/status/1789419820821184764)· [Follow](https://twitter.com/intent/follow?screen_name=WilliamEspegren)[](https://twitter.com/WilliamEspegren/status/1789419820821184764)Web crawler built in rust, currently the nr1 performance in the world with crazy resource management Aaaaaaand they have a cloud offer, that’s wayyyy cheaper than any competitor Name a reason for me to use anything else? [ github.com/spider-rs/spid… ](https://github.com/spider-rs/spider)[ 10:18 PM · May 11, 2024 ](https://twitter.com/WilliamEspegren/status/1789419820821184764) [](https://help.twitter.com/en/twitter-for-websites-ads-info-and-privacy)[2 ](https://twitter.com/intent/like?tweet_id=1789419820821184764) [Reply ](https://twitter.com/intent/tweet?in_reply_to=1789419820821184764)[ Read 1 reply ](https://twitter.com/WilliamEspegren/status/1789419820821184764)[ ](https://twitter.com/Troyusrex/status/1791497607925088307)[Troy Lowry](https://twitter.com/Troyusrex/status/1791497607925088307)[@Troyusrex ](https://twitter.com/Troyusrex/status/1791497607925088307)· [Follow](https://twitter.com/intent/follow?screen_name=Troyusrex)[](https://twitter.com/Troyusrex/status/1791497607925088307)[ @spider\\\\_rust ](https://twitter.com/spider_rust) First, the good: Spider has enabled me to speed up my scraping 20X and with a bit higher quality than I was getting before. I am having a few issues however. First, the documentation link doesn\\'t work ([ spider.cloud/guides/(/docs/… ](https://spider.cloud/guides/(/docs/api)))I\\'ve figured out how to get it to work…[ 3:54 PM · May 17, 2024 ](https://twitter.com/Troyusrex/status/1791497607925088307) [](https://help.twitter.com/en/twitter-for-websites-ads-info-and-privacy)[1 ](https://twitter.com/intent/like?tweet_id=1791497607925088307) [Reply ](https://twitter.com/intent/tweet?in_reply_to=1791497607925088307)[ Read 2 replies ](https://twitter.com/Troyusrex/status/1791497607925088307)FAQ----------Frequently asked questions about SpiderWhat is Spider?---------- Spider is a leading web crawling tool designed for speed and cost-effectiveness, supporting various data formats including LLM-ready markdown.Why is my website not crawling?---------- Your crawl may fail if it requires JavaScript rendering. Try setting your request to \\'chrome\\' to solve this issue.Can you crawl all pages?---------- Yes, Spider accurately crawls all necessary content without needing a sitemap.What formats can Spider convert web data into?---------- Spider outputs HTML, raw, text, and various markdown formats. It supports JSON, JSONL, CSV, and XML for API responses.Is Spider suitable for large scraping projects?---------- Absolutely, Spider is ideal for large-scale data collection and offers a cost-effective dashboard for data management.How can I try Spider?---------- Purchase credits for our cloud system or test the Open Source Spider engine to explore its capabilities.Does it respect robots.txt?---------- Yes, compliance with robots.txt is default, but you can disable this if necessary. [API](/docs/api) [Pricing](/credits/new) [Guides](/guides) [About](/about) [Docs](https://docs.rs/spider/latest/spider/) [Privacy](/privacy) [Terms](/eula) [FAQ](/faq)© 2024 Spider from A11yWatch[GitHubGithub](https://github.com/spider-rs/spider) [X - Twitter ](https://twitter.com/spider_rust)', 'error': None, 'status': 200, 'url': 'https://spider.cloud'}]\n"
+ ]
+ }
+ ],
+ "source": [
+ "from typing import Any, Dict, List\n",
+ "\n",
+ "from spider import Spider\n",
+ "from typing_extensions import Annotated\n",
+ "\n",
+ "\n",
+ "def scrape_page(\n",
+ " url: Annotated[str, \"The URL of the web page to scrape\"],\n",
+ " params: Annotated[dict, \"Dictionary of additional params.\"] = None,\n",
+ ") -> Annotated[Dict[str, Any], \"Scraped content\"]:\n",
+ " # Initialize the Spider client with your API key, if no api key is specified it looks for SPIDER_API_KEY in your environment variables\n",
+ " client = Spider(spider_api_key)\n",
+ "\n",
+ " if params is None:\n",
+ " params = {\"return_format\": \"markdown\"}\n",
+ "\n",
+ " scraped_data = client.scrape_url(url, params)\n",
+ " return scraped_data[0]\n",
+ "\n",
+ "\n",
+ "def crawl_page(\n",
+ " url: Annotated[str, \"The url of the domain to be crawled\"],\n",
+ " params: Annotated[dict, \"Dictionary of additional params.\"] = None,\n",
+ ") -> Annotated[List[Dict[str, Any]], \"Scraped content\"]:\n",
+ " # Initialize the Spider client with your API key, if no api key is specified it looks for SPIDER_API_KEY in your environment variables\n",
+ " client = Spider(spider_api_key)\n",
+ "\n",
+ " if params is None:\n",
+ " params = {\"return_format\": \"markdown\"}\n",
+ "\n",
+ " crawled_data = client.crawl_url(url, params)\n",
+ " return crawled_data"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Create the agents and register the tool."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from autogen import ConversableAgent, register_function\n",
+ "\n",
+ "# Create web scraper agent.\n",
+ "scraper_agent = ConversableAgent(\n",
+ " \"WebScraper\",\n",
+ " llm_config={\"config_list\": config_list},\n",
+ " system_message=\"You are a web scraper and you can scrape any web page to retrieve its contents.\"\n",
+ " \"Returns 'TERMINATE' when the scraping is done.\",\n",
+ ")\n",
+ "\n",
+ "# Create web crawler agent.\n",
+ "crawler_agent = ConversableAgent(\n",
+ " \"WebCrawler\",\n",
+ " llm_config={\"config_list\": config_list},\n",
+ " system_message=\"You are a web crawler and you can crawl any page with deeper crawling following subpages.\"\n",
+ " \"Returns 'TERMINATE' when the scraping is done.\",\n",
+ ")\n",
+ "\n",
+ "# Create user proxy agent.\n",
+ "user_proxy_agent = ConversableAgent(\n",
+ " \"UserProxy\",\n",
+ " llm_config=False, # No LLM for this agent.\n",
+ " human_input_mode=\"NEVER\",\n",
+ " code_execution_config=False, # No code execution for this agent.\n",
+ " is_termination_msg=lambda x: x.get(\"content\", \"\") is not None and \"terminate\" in x[\"content\"].lower(),\n",
+ " default_auto_reply=\"Please continue if not finished, otherwise return 'TERMINATE'.\",\n",
+ ")\n",
+ "\n",
+ "# Register the functions with the agents.\n",
+ "register_function(\n",
+ " scrape_page,\n",
+ " caller=scraper_agent,\n",
+ " executor=user_proxy_agent,\n",
+ " name=\"scrape_page\",\n",
+ " description=\"Scrape a web page and return the content.\",\n",
+ ")\n",
+ "\n",
+ "register_function(\n",
+ " crawl_page,\n",
+ " caller=crawler_agent,\n",
+ " executor=user_proxy_agent,\n",
+ " name=\"crawl_page\",\n",
+ " description=\"Crawl an entire domain, following subpages and return the content.\",\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Start the conversation for scraping web data. We used the\n",
+ "`reflection_with_llm` option for summary method\n",
+ "to perform the formatting of the output into a desired format.\n",
+ "The summary method is called after the conversation is completed\n",
+ "given the complete history of the conversation."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33mUserProxy\u001b[0m (to WebScraper):\n",
+ "\n",
+ "Can you scrape william-espegren.com for me?\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mWebScraper\u001b[0m (to UserProxy):\n",
+ "\n",
+ "\u001b[32m***** Suggested tool call (call_qCNYeQCfIPZkUCKejQmm5EhC): scrape_page *****\u001b[0m\n",
+ "Arguments: \n",
+ "{\"url\":\"https://www.william-espegren.com\"}\n",
+ "\u001b[32m****************************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION scrape_page...\u001b[0m\n",
+ "\u001b[33mUserProxy\u001b[0m (to WebScraper):\n",
+ "\n",
+ "\u001b[33mUserProxy\u001b[0m (to WebScraper):\n",
+ "\n",
+ "\u001b[32m***** Response from calling tool (call_qCNYeQCfIPZkUCKejQmm5EhC) *****\u001b[0m\n",
+ "[{\"content\": \"William Espegren - Portfoliokeep scrollingMADE WITHCSS, JSMADE BYUppsalaWilliam EspegrenWith \\u00b7LoveOpen For Projects[CONTACT ME](https://www.linkedin.com/in/william-espegren/)[Instagram](https://www.instagram.com/williamespegren/)[LinkedIn](https://www.linkedin.com/in/william-espegren/)[Twitter](https://twitter.com/WilliamEspegren)[team-collaboration/version-control/github Created with Sketch.Github](https://github.com/WilliamEspegren)\", \"error\": null, \"status\": 200, \"url\": \"https://www.william-espegren.com\"}]\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mWebScraper\u001b[0m (to UserProxy):\n",
+ "\n",
+ "I successfully scraped the website \"william-espegren.com\". Here is the content retrieved:\n",
+ "\n",
+ "```\n",
+ "William Espegren - Portfolio\n",
+ "\n",
+ "keep scrolling\n",
+ "\n",
+ "MADE WITH\n",
+ "CSS, JS\n",
+ "\n",
+ "MADE BY\n",
+ "Uppsala\n",
+ "\n",
+ "William Espegren\n",
+ "With Love\n",
+ "\n",
+ "Open For Projects\n",
+ "\n",
+ "[CONTACT ME](https://www.linkedin.com/in/william-espegren/)\n",
+ "[Instagram](https://www.instagram.com/williamespegren/)\n",
+ "[LinkedIn](https://www.linkedin.com/in/william-espegren/)\n",
+ "[Twitter](https://twitter.com/WilliamEspegren)\n",
+ "[Github](https://github.com/WilliamEspegren)\n",
+ "```\n",
+ "\n",
+ "Is there anything specific you would like to do with this information?\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33mUserProxy\u001b[0m (to WebScraper):\n",
+ "\n",
+ "Please continue if not finished, otherwise return 'TERMINATE'.\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mWebScraper\u001b[0m (to UserProxy):\n",
+ "\n",
+ "TERMINATE\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Scrape page\n",
+ "scraped_chat_result = user_proxy_agent.initiate_chat(\n",
+ " scraper_agent,\n",
+ " message=\"Can you scrape william-espegren.com for me?\",\n",
+ " summary_method=\"reflection_with_llm\",\n",
+ " summary_args={\"summary_prompt\": \"\"\"Summarize the scraped content\"\"\"},\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33mUserProxy\u001b[0m (to WebCrawler):\n",
+ "\n",
+ "Can you crawl william-espegren.com for me, I want the whole domains information?\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mWebCrawler\u001b[0m (to UserProxy):\n",
+ "\n",
+ "\u001b[32m***** Suggested tool call (call_0FkTtsxBtA0SbChm1PX085Vk): crawl_page *****\u001b[0m\n",
+ "Arguments: \n",
+ "{\"url\":\"http://www.william-espegren.com\"}\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION crawl_page...\u001b[0m\n",
+ "\u001b[33mUserProxy\u001b[0m (to WebCrawler):\n",
+ "\n",
+ "\u001b[33mUserProxy\u001b[0m (to WebCrawler):\n",
+ "\n",
+ "\u001b[32m***** Response from calling tool (call_0FkTtsxBtA0SbChm1PX085Vk) *****\u001b[0m\n",
+ "[{\"content\": \"William Espegren - Portfoliokeep scrollingMADE WITHCSS, JSMADE BYUppsalaWilliam EspegrenWith \\u00b7LoveOpen For Projects[CONTACT ME](https://www.linkedin.com/in/william-espegren/)[Instagram](https://www.instagram.com/williamespegren/)[LinkedIn](https://www.linkedin.com/in/william-espegren/)[Twitter](https://twitter.com/WilliamEspegren)[team-collaboration/version-control/github Created with Sketch.Github](https://github.com/WilliamEspegren)\", \"error\": null, \"status\": 200, \"url\": \"http://www.william-espegren.com\"}]\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mWebCrawler\u001b[0m (to UserProxy):\n",
+ "\n",
+ "The crawl of [william-espegren.com](http://www.william-espegren.com) has been completed. Here is the gathered content:\n",
+ "\n",
+ "---\n",
+ "\n",
+ "**William Espegren - Portfolio**\n",
+ "\n",
+ "Keep scrolling\n",
+ "\n",
+ "**MADE WITH:** CSS, JS\n",
+ "\n",
+ "**MADE BY:** Uppsala\n",
+ "\n",
+ "**William Espegren**\n",
+ "\n",
+ "**With Love**\n",
+ "\n",
+ "**Open For Projects**\n",
+ "\n",
+ "**[CONTACT ME](https://www.linkedin.com/in/william-espegren/)**\n",
+ "\n",
+ "- [Instagram](https://www.instagram.com/williamespegren/)\n",
+ "- [LinkedIn](https://www.linkedin.com/in/william-espegren/)\n",
+ "- [Twitter](https://twitter.com/WilliamEspegren)\n",
+ "- [Github](https://github.com/WilliamEspegren)\n",
+ "\n",
+ "---\n",
+ "\n",
+ "If you need further information or details from any specific section, please let me know!\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33mUserProxy\u001b[0m (to WebCrawler):\n",
+ "\n",
+ "Please continue if not finished, otherwise return 'TERMINATE'.\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mWebCrawler\u001b[0m (to UserProxy):\n",
+ "\n",
+ "TERMINATE\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Crawl page\n",
+ "crawled_chat_result = user_proxy_agent.initiate_chat(\n",
+ " crawler_agent,\n",
+ " message=\"Can you crawl william-espegren.com for me, I want the whole domains information?\",\n",
+ " summary_method=\"reflection_with_llm\",\n",
+ " summary_args={\"summary_prompt\": \"\"\"Summarize the crawled content\"\"\"},\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The output is stored in the summary."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "The website belongs to William Espegren, who is based in Uppsala and possesses skills in CSS and JavaScript. He is open to new projects. You can contact him through the following links:\n",
+ "\n",
+ "- [LinkedIn](https://www.linkedin.com/in/william-espegren/)\n",
+ "- [Instagram](https://www.instagram.com/williamespegren/)\n",
+ "- [Twitter](https://twitter.com/WilliamEspegren)\n",
+ "- [GitHub](https://github.com/WilliamEspegren)\n",
+ "\n",
+ "Feel free to reach out to him for project collaborations.\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(scraped_chat_result.summary)\n",
+ "# print(crawled_chat_result.summary) # We show one for cleaner output"
+ ]
+ }
+ ],
+ "metadata": {
+ "front_matter": {
+ "description": "Scraping/Crawling web pages and summarizing the content using agents.",
+ "tags": [
+ "web scraping",
+ "spider",
+ "tool use"
+ ],
+ "title": "Web Scraper & Crawler Agent using Spider"
+ },
+ "kernelspec": {
+ "display_name": "autogen",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.12"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/notebook/autobuild_function_calling.ipynb b/notebook/autobuild_function_calling.ipynb
new file mode 100644
index 00000000000..f414de4b84a
--- /dev/null
+++ b/notebook/autobuild_function_calling.ipynb
@@ -0,0 +1,470 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# AutoBuild Agents function calling\n",
+ "By: [Krishna Shedbalkar](https://github.com/krishnashed/)\n",
+ "\n",
+ "In this notebook, we introduce a way for Agents created using `Autobuild` to do function calling. Developers can specify a function, function name and function description which will thereafter be assigned and executed by the most suitable agent created using AutoBuild."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Requirement\n",
+ "\n",
+ "AutoBuild require `pyautogen[autobuild]`, which can be installed by the following command:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%pip install pyautogen[autobuild]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Step 1: Prepare configuration and some useful functions\n",
+ "\n",
+ "Prepare a `config_file_or_env` for assistant agent to limit the choice of LLM you want to use in this task. This config can be a path of json file or a name of environment variable. A `default_llm_config` is also required for initialize the specific config of LLMs like seed, temperature, etc. Preventing UserProxy agent being called multiple times by adding `allow_repeat_speaker=agent_list[:-1]`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import autogen\n",
+ "from autogen.agentchat.contrib.agent_builder import AgentBuilder\n",
+ "\n",
+ "config_file_or_env = \"OAI_CONFIG_LIST\"\n",
+ "config_list = autogen.config_list_from_json(config_file_or_env, filter_dict={\"model\": [\"gpt-4-1106-preview\", \"gpt-4\"]})\n",
+ "llm_config = {\n",
+ " \"config_list\": config_list,\n",
+ " \"timeout\": 120,\n",
+ "}\n",
+ "\n",
+ "\n",
+ "def start_task(execution_task: str, agent_list: list):\n",
+ " group_chat = autogen.GroupChat(agents=agent_list, messages=[], allow_repeat_speaker=agent_list[:-1], max_round=12)\n",
+ " manager = autogen.GroupChatManager(groupchat=group_chat, llm_config={\"config_list\": config_list})\n",
+ " agent_list[0].initiate_chat(manager, message=execution_task)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Step 2: Create a AgentBuilder\n",
+ "\n",
+ "Create a `AgentBuilder` with the specified `config_path_or_env`. AgentBuilder will use `gpt-4` in default to complete the whole process, you can specify the `builder_model` and `agent_model` to other OpenAI model to match your task. You can also specify an open-source LLM supporting by vLLM and FastChat, see blog for more details."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "builder = AgentBuilder(\n",
+ " config_file_or_env=config_file_or_env, builder_model=\"gpt-4-1106-preview\", agent_model=\"gpt-4-1106-preview\"\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Step 3: Specify a building task\n",
+ "\n",
+ "Specify a building task with a general description. Building task will help build manager (a LLM) decide what agents should be built."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "building_task = \"Analyze and list the trending topics in arxiv papers related to GPT-4\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Step 4: Define functions\n",
+ "\n",
+ "Define functions to be executed by the Agents of AutoBuild, further specify details like `name`, `description` and `function` of all the functions in an array called `list_of_functions` which will be passed to `builder.build()`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import time\n",
+ "from datetime import datetime, timedelta\n",
+ "from typing import Dict\n",
+ "\n",
+ "import feedparser\n",
+ "\n",
+ "\n",
+ "def get_arxiv_paper_from_a_week(search_topic: str) -> Dict:\n",
+ " # arXiv API endpoint\n",
+ " url = \"http://export.arxiv.org/api/query?\"\n",
+ "\n",
+ " # Search parameters\n",
+ " max_results = 10\n",
+ "\n",
+ " query = (\n",
+ " f\"{url}search_query=all:{search_topic}&max_results={max_results}&sortBy=lastUpdatedDate&sortOrder=descending\"\n",
+ " )\n",
+ "\n",
+ " # Parse the feed\n",
+ " feed = feedparser.parse(query)\n",
+ "\n",
+ " now = datetime.now()\n",
+ " week_ago = now - timedelta(weeks=1)\n",
+ "\n",
+ " papers = []\n",
+ "\n",
+ " # Get papers from last week\n",
+ " for entry in feed.entries:\n",
+ " published_time = datetime.strptime(entry.published, \"%Y-%m-%dT%H:%M:%SZ\")\n",
+ " if published_time > week_ago:\n",
+ " list_of_authors = \", \".join(author.name for author in entry.authors)\n",
+ "\n",
+ " papers.append(\n",
+ " {\n",
+ " \"title\": entry.title,\n",
+ " \"authors\": list_of_authors,\n",
+ " \"published_on\": time.strftime(\"%B %d, %Y\", entry.published_parsed),\n",
+ " \"summary\": entry.summary,\n",
+ " \"link\": entry.link,\n",
+ " }\n",
+ " )\n",
+ "\n",
+ " return papers\n",
+ "\n",
+ "\n",
+ "list_of_functions = [\n",
+ " {\n",
+ " \"name\": \"get_arxiv_paper_from_a_week\",\n",
+ " \"description\": \"Get arxiv papers published in last week\",\n",
+ " \"function\": get_arxiv_paper_from_a_week,\n",
+ " }\n",
+ "]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Step 5: build group chat agents\n",
+ "\n",
+ "Use `build()` to let build manager (the specified `builder_model`) complete the group chat agents generation. Specify `list_of_functions` to be used by the Agents"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[32m==> Generating agents...\u001b[0m\n",
+ "['NLP_Expert', 'DataAnalysis_Expert', 'AIResearch_Expert'] are generated.\n",
+ "\u001b[32m==> Generating system message...\u001b[0m\n",
+ "Preparing system message for NLP_Expert\n",
+ "Preparing system message for DataAnalysis_Expert\n",
+ "Preparing system message for AIResearch_Expert\n",
+ "\u001b[32m==> Generating description...\u001b[0m\n",
+ "Preparing description for NLP_Expert\n",
+ "Preparing description for DataAnalysis_Expert\n",
+ "Preparing description for AIResearch_Expert\n",
+ "\u001b[32m==> Creating agents...\u001b[0m\n",
+ "Creating agent NLP_Expert...\n",
+ "Creating agent DataAnalysis_Expert...\n",
+ "Creating agent AIResearch_Expert...\n",
+ "Adding user console proxy...\n",
+ "Function get_arxiv_paper_from_a_week is registered to agent DataAnalysis_Expert.\n"
+ ]
+ }
+ ],
+ "source": [
+ "agent_list, agent_configs = builder.build(building_task, llm_config, list_of_functions, max_agents=3)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here you can see that Function `exec_python` has been associated with `ArxivAPI_Expert` Agent."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Step 6: execute task\n",
+ "\n",
+ "Let agents generated in `build()` to complete the task collaboratively in a group chat."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33mNLP_Expert\u001b[0m (to chat_manager):\n",
+ "\n",
+ "Analyze and list the trending topics in arxiv papers related to GPT-4\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[32m\n",
+ "Next speaker: DataAnalysis_Expert\n",
+ "\u001b[0m\n",
+ "\u001b[33mDataAnalysis_Expert\u001b[0m (to chat_manager):\n",
+ "\n",
+ "\u001b[32m***** Suggested tool call (call_hkKs7wbCyAOMkC4QjOYMLgtd): get_arxiv_paper_from_a_week *****\u001b[0m\n",
+ "Arguments: \n",
+ "{\"search_topic\":\"GPT-4\"}\n",
+ "\u001b[32m********************************************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: NLP_Expert\n",
+ "\u001b[0m\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION get_arxiv_paper_from_a_week...\u001b[0m\n",
+ "\u001b[33mNLP_Expert\u001b[0m (to chat_manager):\n",
+ "\n",
+ "\u001b[33mNLP_Expert\u001b[0m (to chat_manager):\n",
+ "\n",
+ "\u001b[32m***** Response from calling tool (call_hkKs7wbCyAOMkC4QjOYMLgtd) *****\u001b[0m\n",
+ "[{\"title\": \"Self-Training with Direct Preference Optimization Improves\\n Chain-of-Thought Reasoning\", \"authors\": \"Tianduo Wang, Shichen Li, Wei Lu\", \"published_on\": \"July 25, 2024\", \"summary\": \"Effective training of language models (LMs) for mathematical reasoning tasks\\ndemands high-quality supervised fine-tuning data. Besides obtaining annotations\\nfrom human experts, a common alternative is sampling from larger and more\\npowerful LMs. However, this knowledge distillation approach can be costly and\\nunstable, particularly when relying on closed-source, proprietary LMs like\\nGPT-4, whose behaviors are often unpredictable. In this work, we demonstrate\\nthat the reasoning abilities of small-scale LMs can be enhanced through\\nself-training, a process where models learn from their own outputs. We also\\nshow that the conventional self-training can be further augmented by a\\npreference learning algorithm called Direct Preference Optimization (DPO). By\\nintegrating DPO into self-training, we leverage preference data to guide LMs\\ntowards more accurate and diverse chain-of-thought reasoning. We evaluate our\\nmethod across various mathematical reasoning tasks using different base models.\\nOur experiments show that this approach not only improves LMs' reasoning\\nperformance but also offers a more cost-effective and scalable solution\\ncompared to relying on large proprietary LMs.\", \"link\": \"http://arxiv.org/abs/2407.18248v1\"}, {\"title\": \"C2P: Featuring Large Language Models with Causal Reasoning\", \"authors\": \"Abdolmahdi Bagheri, Matin Alinejad, Kevin Bello, Alireza Akhondi-Asl\", \"published_on\": \"July 25, 2024\", \"summary\": \"Causal reasoning is the primary bottleneck that Large Language Models (LLMs)\\nmust overcome to attain human-level intelligence. To address this, we introduce\\nthe Causal Chain of Prompting (C2P) as the first reasoning framework that\\nequips current LLMs with causal reasoning capabilities. C2P operates\\nautonomously, avoiding reliance on external tools or modules during both the\\ncausal learning and reasoning phases, and can be seamlessly implemented during\\nthe training or fine-tuning of LLMs. Experimental results across various\\nbenchmark datasets demonstrate a significant improvement in causal learning and\\nsubsequent reasoning accuracy of LLMs. We illustrate how C2P enhances LLMs'\\nability to causally reason in real-world scenarios, addressing complex problems\\nin fields such as healthcare, medicine, economics, education, social sciences,\\nenvironmental science, and marketing. With few-shot learning, GPT-4 Turbo using\\nC2P with as few as six examples achieves significant performance improvements,\\nboasting over a 33% increase in reasoning accuracy over the most\\nstate-of-the-art LLMs, which perform nearly randomly in similar circumstances.\\nThis demonstrates the transformative potential of integrating C2P into LLM\\ntraining or fine-tuning processes, thereby empowering these models with\\nadvanced causal reasoning capabilities.\", \"link\": \"http://arxiv.org/abs/2407.18069v1\"}, {\"title\": \"Is the Digital Forensics and Incident Response Pipeline Ready for\\n Text-Based Threats in LLM Era?\", \"authors\": \"Avanti Bhandarkar, Ronald Wilson, Anushka Swarup, Mengdi Zhu, Damon Woodard\", \"published_on\": \"July 25, 2024\", \"summary\": \"In the era of generative AI, the widespread adoption of Neural Text\\nGenerators (NTGs) presents new cybersecurity challenges, particularly within\\nthe realms of Digital Forensics and Incident Response (DFIR). These challenges\\nprimarily involve the detection and attribution of sources behind advanced\\nattacks like spearphishing and disinformation campaigns. As NTGs evolve, the\\ntask of distinguishing between human and NTG-authored texts becomes critically\\ncomplex. This paper rigorously evaluates the DFIR pipeline tailored for\\ntext-based security systems, specifically focusing on the challenges of\\ndetecting and attributing authorship of NTG-authored texts. By introducing a\\nnovel human-NTG co-authorship text attack, termed CS-ACT, our study uncovers\\nsignificant vulnerabilities in traditional DFIR methodologies, highlighting\\ndiscrepancies between ideal scenarios and real-world conditions. Utilizing 14\\ndiverse datasets and 43 unique NTGs, up to the latest GPT-4, our research\\nidentifies substantial vulnerabilities in the forensic profiling phase,\\nparticularly in attributing authorship to NTGs. Our comprehensive evaluation\\npoints to factors such as model sophistication and the lack of distinctive\\nstyle within NTGs as significant contributors for these vulnerabilities. Our\\nfindings underscore the necessity for more sophisticated and adaptable\\nstrategies, such as incorporating adversarial learning, stylizing NTGs, and\\nimplementing hierarchical attribution through the mapping of NTG lineages to\\nenhance source attribution. This sets the stage for future research and the\\ndevelopment of more resilient text-based security systems.\", \"link\": \"http://arxiv.org/abs/2407.17870v1\"}, {\"title\": \"Cost-effective Instruction Learning for Pathology Vision and Language\\n Analysis\", \"authors\": \"Kaitao Chen, Mianxin Liu, Fang Yan, Lei Ma, Xiaoming Shi, Lilong Wang, Xiaosong Wang, Lifeng Zhu, Zhe Wang, Mu Zhou, Shaoting Zhang\", \"published_on\": \"July 25, 2024\", \"summary\": \"The advent of vision-language models fosters the interactive conversations\\nbetween AI-enabled models and humans. Yet applying these models into clinics\\nmust deal with daunting challenges around large-scale training data, financial,\\nand computational resources. Here we propose a cost-effective instruction\\nlearning framework for conversational pathology named as CLOVER. CLOVER only\\ntrains a lightweight module and uses instruction tuning while freezing the\\nparameters of the large language model. Instead of using costly GPT-4, we\\npropose well-designed prompts on GPT-3.5 for building generation-based\\ninstructions, emphasizing the utility of pathological knowledge derived from\\nthe Internet source. To augment the use of instructions, we construct a\\nhigh-quality set of template-based instructions in the context of digital\\npathology. From two benchmark datasets, our findings reveal the strength of\\nhybrid-form instructions in the visual question-answer in pathology. Extensive\\nresults show the cost-effectiveness of CLOVER in answering both open-ended and\\nclosed-ended questions, where CLOVER outperforms strong baselines that possess\\n37 times more training parameters and use instruction data generated from\\nGPT-4. Through the instruction tuning, CLOVER exhibits robustness of few-shot\\nlearning in the external clinical dataset. These findings demonstrate that\\ncost-effective modeling of CLOVER could accelerate the adoption of rapid\\nconversational applications in the landscape of digital pathology.\", \"link\": \"http://arxiv.org/abs/2407.17734v1\"}, {\"title\": \"My Ontologist: Evaluating BFO-Based AI for Definition Support\", \"authors\": \"Carter Benson, Alec Sculley, Austin Liebers, John Beverley\", \"published_on\": \"July 24, 2024\", \"summary\": \"Generative artificial intelligence (AI), exemplified by the release of\\nGPT-3.5 in 2022, has significantly advanced the potential applications of large\\nlanguage models (LLMs), including in the realms of ontology development and\\nknowledge graph creation. Ontologies, which are structured frameworks for\\norganizing information, and knowledge graphs, which combine ontologies with\\nactual data, are essential for enabling interoperability and automated\\nreasoning. However, current research has largely overlooked the generation of\\nontologies extending from established upper-level frameworks like the Basic\\nFormal Ontology (BFO), risking the creation of non-integrable ontology silos.\\nThis study explores the extent to which LLMs, particularly GPT-4, can support\\nontologists trained in BFO. Through iterative development of a specialized GPT\\nmodel named \\\"My Ontologist,\\\" we aimed to generate BFO-conformant ontologies.\\nInitial versions faced challenges in maintaining definition conventions and\\nleveraging foundational texts effectively. My Ontologist 3.0 showed promise by\\nadhering to structured rules and modular ontology suites, yet the release of\\nGPT-4o disrupted this progress by altering the model's behavior. Our findings\\nunderscore the importance of aligning LLM-generated ontologies with top-level\\nstandards and highlight the complexities of integrating evolving AI\\ncapabilities in ontology engineering.\", \"link\": \"http://arxiv.org/abs/2407.17657v1\"}, {\"title\": \"Can GPT-4 learn to analyze moves in research article abstracts?\", \"authors\": \"Danni Yu, Marina Bondi, Ken Hyland\", \"published_on\": \"July 22, 2024\", \"summary\": \"One of the most powerful and enduring ideas in written discourse analysis is\\nthat genres can be described in terms of the moves which structure a writer's\\npurpose. Considerable research has sought to identify these distinct\\ncommunicative acts, but analyses have been beset by problems of subjectivity,\\nreliability and the time-consuming need for multiple coders to confirm\\nanalyses. In this paper we employ the affordances of GPT-4 to automate the\\nannotation process by using natural language prompts. Focusing on abstracts\\nfrom articles in four applied linguistics journals, we devise prompts which\\nenable the model to identify moves effectively. The annotated outputs of these\\nprompts were evaluated by two assessors with a third addressing disagreements.\\nThe results show that an 8-shot prompt was more effective than one using two,\\nconfirming that the inclusion of examples illustrating areas of variability can\\nenhance GPT-4's ability to recognize multiple moves in a single sentence and\\nreduce bias related to textual position. We suggest that GPT-4 offers\\nconsiderable potential in automating this annotation process, when human actors\\nwith domain specific linguistic expertise inform the prompting process.\", \"link\": \"http://arxiv.org/abs/2407.15612v2\"}, {\"title\": \"I Could've Asked That: Reformulating Unanswerable Questions\", \"authors\": \"Wenting Zhao, Ge Gao, Claire Cardie, Alexander M. Rush\", \"published_on\": \"July 24, 2024\", \"summary\": \"When seeking information from unfamiliar documents, users frequently pose\\nquestions that cannot be answered by the documents. While existing large\\nlanguage models (LLMs) identify these unanswerable questions, they do not\\nassist users in reformulating their questions, thereby reducing their overall\\nutility. We curate CouldAsk, an evaluation benchmark composed of existing and\\nnew datasets for document-grounded question answering, specifically designed to\\nstudy reformulating unanswerable questions. We evaluate state-of-the-art\\nopen-source and proprietary LLMs on CouldAsk. The results demonstrate the\\nlimited capabilities of these models in reformulating questions. Specifically,\\nGPT-4 and Llama2-7B successfully reformulate questions only 26% and 12% of the\\ntime, respectively. Error analysis shows that 62% of the unsuccessful\\nreformulations stem from the models merely rephrasing the questions or even\\ngenerating identical questions. We publicly release the benchmark and the code\\nto reproduce the experiments.\", \"link\": \"http://arxiv.org/abs/2407.17469v1\"}]\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: DataAnalysis_Expert\n",
+ "\u001b[0m\n",
+ "\u001b[33mDataAnalysis_Expert\u001b[0m (to chat_manager):\n",
+ "\n",
+ "After reviewing the recent arXiv papers related to GPT-4, the following trending topics have been identified:\n",
+ "\n",
+ "1. **Chain-of-Thought Reasoning and Self-Training**: One study proposes self-training methods, coupled with Direct Preference Optimization (DPO), to improve chain-of-thought reasoning in smaller language models, addressing the limitations of depending on proprietary large language models like GPT-4 for knowledge distillation ([source](http://arxiv.org/abs/2407.18248v1)).\n",
+ "\n",
+ "2. **Causal Reasoning in LLMs**: Another research introduces the Causal Chain of Prompting (C2P) framework designed to equip large language models with causal reasoning capabilities. The study shows that this framework, when used with GPT-4, achieves significant performance improvements in various practical scenarios ([source](http://arxiv.org/abs/2407.18069v1)).\n",
+ "\n",
+ "3. **Digital Forensics and Text-Based Security Threats**: This paper evaluates how well the Digital Forensics and Incident Response (DFIR) pipeline can handle text-based threats in the LLM era, specifically in the context of detecting and attributing authorship to texts generated by neural text generators like GPT-4 ([source](http://arxiv.org/abs/2407.17870v1)).\n",
+ "\n",
+ "4. **Cost-Effective Instruction Learning**: A research team discusses a new cost-effective instruction learning framework for conversational pathology named CLOVER, which leverages well-designed prompts on GPT-3.5, demonstrating that savings can be made on computation and financial resources while applying language models in a clinical setting ([source](http://arxiv.org/abs/2407.17734v1)).\n",
+ "\n",
+ "5. **Ontology Generation and AI Alignment**: An investigation into the feasibility of GPT-4 supporting ontology development based on the Basic Formal Ontology (BFO). The findings suggest challenges in aligning the rapidly evolving capabilities of LLMs with well-structured ontology standards ([source](http://arxiv.org/abs/2407.17657v1)).\n",
+ "\n",
+ "6. **Automating Annotation Processes in Discourse Analysis**: Researchers employ GPT-4's capacity to automate the annotation of communicative moves in written discourse, using natural language prompts to identify structural elements in academic abstracts more effectively ([source](http://arxiv.org/abs/2407.15612v2)).\n",
+ "\n",
+ "7. **Reformulating Unanswerable Questions**: Another study creates a benchmark for evaluating LLMs' performance in reformulating unanswerable questions. The findings indicate that even state-of-the-art models like GPT-4 struggle with this task, often failing to significantly alter the original unanswerable question ([source](http://arxiv.org/abs/2407.17469v1)).\n",
+ "\n",
+ "These topics illustrate the varied applications of GPT-4 and highlight the research community's focus on enhancing reasoning, security, cost-effectiveness, and interoperability within LLMs. Additionally, the challenges of annotation and question reformulation show that there is still room for improvement in understanding and processing natural language through AI.\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: AIResearch_Expert\n",
+ "\u001b[0m\n",
+ "\u001b[33mAIResearch_Expert\u001b[0m (to chat_manager):\n",
+ "\n",
+ "Based on the synthesized information provided by the DataAnalysis_Expert, the current trending topics for GPT-4 research seem to cover a spectrum of cognitive and practical applications: from enhancing reasoning capabilities, improving training methodologies, tackling new cybersecurity issues, to the more nuanced tasks of ontology engineering and discourse annotation.\n",
+ "\n",
+ "A future direction that seems particularly pertinent is investigating the interplay between GPT-4's abilities and human intelligence. For example, how GPT-4 can support specialized professionals in tasks that require highly structured knowledge, such as legal document analysis, medical diagnosis, or engineering design. There's scope to explore how the communication between GPT-4 and humans could be streamlined for cooperative problem-solving.\n",
+ "\n",
+ "Another promising direction is delving into the area of affective computing: understanding emotions, sarcasm, and subtleties in text to improve human-computer interactions. This can lead to breakthrough applications in personalized digital assistants, education (tailored student feedback), and mental health (empathetic conversational agents).\n",
+ "\n",
+ "Finally, with the mentioned challenges in reformulation of unanswerable questions and ontology alignment, extensive research focused on understanding the limitations of GPT-4's language comprehension could result in more nuanced teaching mechanisms for AI, leading to more reliable autonomous decision-making applications.\n",
+ "\n",
+ "Would the NLP_Expert or DataAnalysis_Expert like to weigh in on some applications or future research directions specifically leveraging linguistic or data-driven nuances that GPT-4 may be capable of addressing?\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: NLP_Expert\n",
+ "\u001b[0m\n",
+ "\u001b[33mNLP_Expert\u001b[0m (to chat_manager):\n",
+ "\n",
+ "Sure, I'd like to add some insights from the perspective of NLP.\n",
+ "\n",
+ "The field of representational learning is particularly important as we continue to seek ways to improve GPT-4's performance. More sophisticated embedding methods could capture nuanced semantic relationships, make fine-grained distinctions among synonyms, handle polysemy more effectively, and distinguish subtle connotations—a direction that might enhance GPT-4's capabilities in tasks like sentiment analysis, irony detection, and humor recognition.\n",
+ "\n",
+ "In terms of practical applications, one future direction lies in the domain of legal and ethical reasoning. As systems like GPT-4 are leveraged for legal research and analysis, it will be essential to teach the model to navigate complex ethical considerations and the nuanced language of legal literature. This could involve training on specialized datasets that include case law and legal precedence, as well as philosophical texts dealing with ethics.\n",
+ "\n",
+ "Moreover, the trend towards cross-domain applications — such as pathology vision-language analysis mentioned in the papers — suggests that GPT-4 could be tailored to handle multi-modal inputs more effectively. Here the interaction between visual data and textual information requires further exploration, particularly in the way that GPT-4 processes and generates language grounded in non-textual contexts.\n",
+ "\n",
+ "Lastly, the recurring theme of cost-effectiveness in model training opens up a broader discussion about sustainable AI practices. Finding ways to reduce the data, energy, and computational resources necessary for training and deploying models like GPT-4 isn't just a technical challenge, but also an environmental and economic one. This includes research into more efficient algorithms and architectures, as well as methodologies for leveraging smaller models to approximate the capabilities of more substantial systems.\n",
+ "\n",
+ "There are indeed a myriad of pathways for future research, all of which underscore the evolving nature of NLP and its entwinement with the continually advancing capabilities of models like GPT-4. Any further explorations or examinations on these topics from the group?\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: MachineLearning_Expert\n",
+ "\u001b[0m\n",
+ "\u001b[33mMachineLearning_Expert\u001b[0m (to chat_manager):\n",
+ "\n",
+ "Considering the points made by the NLP_Expert and AIResearch_Expert, I'd like to contribute a take on how we might further the state-of-the-art in machine learning leveraging the capabilities of GPT-4.\n",
+ "\n",
+ "For one, the convergence of unsupervised, semi-supervised, and supervised learning methodologies might be beneficial for GPT-4's future iterations or similar models. The incorporation of unsupervised learning can help in better understanding context and meaning without substantial labeled datasets. This could improve GPT-4's potential in low-resource languages and niche applications where labeled data is scarce.\n",
+ "\n",
+ "Another realm of exploration could be few-shot and one-shot learning. As models become more capable, their ability to generalize from fewer examples is critical. Fine-tuning GPT-4's few-shot learning capabilities can have practical implications in personalized AI services where the model needs to adapt quickly to individual user needs and preferences.\n",
+ "\n",
+ "Additionally, given the recent trends in GPT-4 research, the incorporation of neuro-symbolic approaches may offer a promising avenue for enhancing reasoning capabilities. This hybrid approach combines neural networks' learning prowess with symbolic AI's rule-based reasoning - providing a pathway to improve GPT-4's causal reasoning, problem-solving, and comprehension of complex systems.\n",
+ "\n",
+ "Finally, as we progress toward AI models that comprehend and generate human-like text, the ethical dimension becomes paramount. It's crucial for future research to focus on models' accountability, interpretability, and fairness. By building robust mechanisms for transparency and control, we can ensure that advancements in GPT-4 and similar AI are developed responsibly and augment human abilities without unintended consequences.\n",
+ "\n",
+ "If the group agrees, these threads can weave together a forward-looking agenda for research in machine learning, focusing on resource efficiency, learning dexterity, cognitive depth, and ethical robustness. Are we in consensus to proceed on finalizing our analysis and concluding our discussion?\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: AI_Research_Expert\n",
+ "\u001b[0m\n",
+ "\u001b[33mAI_Research_Expert\u001b[0m (to chat_manager):\n",
+ "\n",
+ "The interdisciplinary perspectives presented here create a comprehensive view of the potential for GPT-4 and its related research domains. We have touched upon cognitive enhancements in machine reasoning, representational learning, cross-domain applications, sustainable AI, few-shot learning, neuro-symbolic approaches, and ethical considerations in AI development.\n",
+ "\n",
+ "As an AI Research Expert, I second the synthesis of these insights and propose that our analysis has reached a natural conclusion with consensus on the future directions and implications of GPT-4's continuing evolution within the AI landscape.\n",
+ "\n",
+ "If there are no additional insights or questions from the group, I suggest we may consider our task complete. Shall we proceed to close our discussion?\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: Computer_terminal\n",
+ "\u001b[0m\n",
+ "\u001b[33mComputer_terminal\u001b[0m (to chat_manager):\n",
+ "\n",
+ "There is no code from the last 1 message for me to execute. Group chat manager should let other participants to continue the conversation. If the group chat manager want to end the conversation, you should let other participant reply me only with \"TERMINATE\"\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: AI_Research_Expert\n",
+ "\u001b[0m\n",
+ "\u001b[33mAI_Research_Expert\u001b[0m (to chat_manager):\n",
+ "\n",
+ "TERMINATE\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n"
+ ]
+ }
+ ],
+ "source": [
+ "start_task(execution_task=building_task, agent_list=agent_list)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Step 7 (Optional): clear all agents and prepare for the next task\n",
+ "\n",
+ "You can clear all agents generated in this task by the following code if your task is completed or the next task is largely different from the current task. If the agent's backbone is an open-source LLM, this process will also shut down the endpoint server. If necessary, you can use `recycle_endpoint=False` to retain the previous open-source LLMs' endpoint server."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33mAll agents have been cleared.\u001b[0m\n"
+ ]
+ }
+ ],
+ "source": [
+ "builder.clear_all_agents(recycle_endpoint=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Save & load configs\n",
+ "\n",
+ "You can save all necessary information of the built group chat agents. Here is a case for those agents generated in the above task:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[32mBuilding config saved to ./save_config_8e0d96e24673563ecb572d92ed003d2a.json\u001b[0m\n"
+ ]
+ }
+ ],
+ "source": [
+ "saved_path = builder.save()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.14"
+ },
+ "orig_nbformat": 4,
+ "vscode": {
+ "interpreter": {
+ "hash": "e7370f93d1d0cde622a1f8e1c04877d8463912d04d973331ad4851f04de6915a"
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+ }
diff --git a/setup.py b/setup.py
index fe55a4a6c2e..dfc5e912779 100644
--- a/setup.py
+++ b/setup.py
@@ -107,6 +107,7 @@
"cohere": ["cohere>=5.5.8"],
"ollama": ["ollama>=0.3.3", "fix_busted_json>=0.0.18"],
"bedrock": ["boto3>=1.34.149"],
+ "kubernetes": ["kubernetes>=27.2.0"],
}
setuptools.setup(
diff --git a/test/agentchat/contrib/test_agent_builder.py b/test/agentchat/contrib/test_agent_builder.py
index e2e39e8ba43..e0d8515c0fe 100755
--- a/test/agentchat/contrib/test_agent_builder.py
+++ b/test/agentchat/contrib/test_agent_builder.py
@@ -3,15 +3,18 @@
import json
import os
import sys
+from unittest.mock import MagicMock, patch
import pytest
+import autogen
from autogen.agentchat.contrib.agent_builder import AgentBuilder
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
+
from conftest import reason, skip_openai # noqa: E402
-from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST # noqa: E402
+from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST # noqa: E402 # noqa: E402
try:
import chromadb
@@ -22,6 +25,7 @@
skip = False
here = os.path.abspath(os.path.dirname(__file__))
+llm_config = {"temperature": 0}
def _config_check(config):
@@ -37,10 +41,27 @@ def _config_check(config):
assert agent_config.get("system_message", None) is not None
-@pytest.mark.skipif(
- skip_openai,
- reason=reason,
-)
+# Function initializes a group chat with agents and starts a execution_task.
+def start_task(execution_task: str, agent_list: list):
+ group_chat = autogen.GroupChat(agents=agent_list, messages=[], max_round=12)
+ manager = autogen.GroupChatManager(
+ groupchat=group_chat,
+ llm_config={"config_list": autogen.config_list_from_json(f"{KEY_LOC}/{OAI_CONFIG_LIST}"), **llm_config},
+ )
+
+ agent_list[0].initiate_chat(manager, message=execution_task)
+
+
+ask_ossinsight_mock = MagicMock()
+
+
+# Function to test function calling
+def ask_ossinsight(question: str) -> str:
+ ask_ossinsight_mock(question)
+ return "The repository microsoft/autogen has 123,456 stars on GitHub."
+
+
+@pytest.mark.skipif(skip_openai, reason=reason)
def test_build():
builder = AgentBuilder(
config_file_or_env=OAI_CONFIG_LIST,
@@ -69,6 +90,99 @@ def test_build():
assert len(agent_config["agent_configs"]) <= builder.max_agents
+@pytest.mark.skipif(skip_openai or skip, reason=reason + "OR dependency not installed")
+def test_build_assistant_with_function_calling():
+ list_of_functions = [
+ {
+ "name": "ossinsight_data_api",
+ "description": "This is an API endpoint allowing users (analysts) to input question about GitHub in text format to retrieve the related and structured data.",
+ "function": ask_ossinsight,
+ }
+ ]
+
+ builder = AgentBuilder(
+ config_file_or_env=OAI_CONFIG_LIST, config_file_location=KEY_LOC, builder_model="gpt-4", agent_model="gpt-4"
+ )
+ building_task = "How many stars microsoft/autogen has on GitHub?"
+
+ agent_list, agent_config = builder.build(
+ building_task=building_task,
+ default_llm_config={"temperature": 0},
+ code_execution_config={
+ "last_n_messages": 2,
+ "work_dir": f"{here}/test_agent_scripts",
+ "timeout": 60,
+ "use_docker": "python:3",
+ },
+ list_of_functions=list_of_functions,
+ )
+
+ _config_check(agent_config)
+
+ # check number of agents
+ assert len(agent_config["agent_configs"]) <= builder.max_agents
+
+ # Mock the 'ask_ossinsight' function in the '_main_' module using a context manager.
+ with patch(f"{__name__}.ask_ossinsight") as mocked_function:
+ # Execute 'start_task' which should trigger 'ask_ossinsight' due to the given execution task.
+ start_task(
+ execution_task="How many stars microsoft/autogen has on GitHub?",
+ agent_list=agent_list,
+ )
+
+ # Verify that 'ask_ossinsight' was called exactly once during the task execution.
+ mocked_function.assert_called()
+
+
+@pytest.mark.skipif(
+ skip_openai,
+ reason="requested to skip",
+)
+def test_build_gpt_assistant_with_function_calling():
+ list_of_functions = [
+ {
+ "name": "ossinsight_data_api",
+ "description": "This is an API endpoint allowing users (analysts) to input question about GitHub in text format to retrieve the related and structured data.",
+ "function": ask_ossinsight,
+ }
+ ]
+
+ builder = AgentBuilder(
+ config_file_or_env=OAI_CONFIG_LIST, config_file_location=KEY_LOC, builder_model="gpt-4", agent_model="gpt-4"
+ )
+
+ building_task = "Determine number of stars of GitHub repositories"
+
+ agent_list, agent_config = builder.build(
+ building_task=building_task,
+ default_llm_config={"temperature": 0},
+ code_execution_config={
+ "last_n_messages": 2,
+ "work_dir": f"{here}/test_agent_scripts",
+ "timeout": 60,
+ "use_docker": "python:3",
+ },
+ list_of_functions=list_of_functions,
+ use_oai_assistant=True,
+ )
+
+ _config_check(agent_config)
+
+ # check number of agents
+ assert len(agent_config["agent_configs"]) <= builder.max_agents
+
+ # Mock the 'ask_ossinsight' function in the '_main_' module using a context manager.
+ with patch(f"{__name__}.ask_ossinsight") as mocked_function:
+ # Execute 'start_task' which should trigger 'ask_ossinsight' due to the given execution task.
+ start_task(
+ execution_task="How many stars microsoft/autogen has on GitHub?",
+ agent_list=agent_list,
+ )
+
+ # Verify that 'ask_ossinsight' was called exactly once during the task execution.
+ mocked_function.assert_called()
+
+
@pytest.mark.skipif(
skip_openai or skip,
reason=reason + "OR dependency not installed",
@@ -122,10 +236,7 @@ def test_build_from_library():
assert len(agent_config["agent_configs"]) <= builder.max_agents
-@pytest.mark.skipif(
- skip_openai,
- reason=reason,
-)
+@pytest.mark.skipif(skip_openai, reason=reason)
def test_save():
builder = AgentBuilder(
config_file_or_env=OAI_CONFIG_LIST,
@@ -159,10 +270,7 @@ def test_save():
_config_check(saved_configs)
-@pytest.mark.skipif(
- skip_openai,
- reason=reason,
-)
+@pytest.mark.skipif(skip_openai, reason=reason)
def test_load():
builder = AgentBuilder(
config_file_or_env=OAI_CONFIG_LIST,
@@ -188,10 +296,7 @@ def test_load():
_config_check(loaded_agent_configs)
-@pytest.mark.skipif(
- skip_openai,
- reason=reason,
-)
+@pytest.mark.skipif(skip_openai, reason=reason)
def test_clear_agent():
builder = AgentBuilder(
config_file_or_env=OAI_CONFIG_LIST,
@@ -218,6 +323,8 @@ def test_clear_agent():
if __name__ == "__main__":
test_build()
+ test_build_assistant_with_function_calling()
+ test_build_gpt_assistant_with_function_calling()
test_build_from_library()
test_save()
test_load()
diff --git a/test/coding/test_kubernetes.commandline_code_executor.md b/test/coding/test_kubernetes.commandline_code_executor.md
new file mode 100644
index 00000000000..ad9348dc600
--- /dev/null
+++ b/test/coding/test_kubernetes.commandline_code_executor.md
@@ -0,0 +1,44 @@
+# Test Environment for autogen.coding.kubernetes.PodCommandLineCodeExecutor
+
+To test PodCommandLineCodeExecutor, the following environment is required.
+- kubernetes cluster config file
+- autogen package
+
+## kubernetes cluster config file
+
+kubernetes cluster config file, kubeconfig file's location should be set on environment variable `KUBECONFIG` or
+It must be located in the .kube/config path of your home directory.
+
+For Windows, `C:\Users\<>\.kube\config`,
+For Linux or MacOS, place the kubeconfig file in the `/home/<>/.kube/config` directory.
+
+## package install
+
+Clone autogen github repository for package install and testing
+
+Clone the repository with the command below.
+
+before contribution
+```sh
+git clone -b k8s-code-executor https://github.com/questcollector/autogen.git
+```
+
+after contribution
+```sh
+git clone https://github.com/microsoft/autogen.git
+```
+
+install autogen with kubernetes >= 27.0.2
+
+```sh
+cd autogen
+pip install .[kubernetes] -U
+```
+
+## test execution
+
+Perform the test with the following command
+
+```sh
+pytest test/coding/test_kubernetes_commandline_code_executor.py
+```
diff --git a/test/coding/test_kubernetes_commandline_code_executor.py b/test/coding/test_kubernetes_commandline_code_executor.py
new file mode 100644
index 00000000000..09e6b36aafa
--- /dev/null
+++ b/test/coding/test_kubernetes_commandline_code_executor.py
@@ -0,0 +1,203 @@
+import importlib
+import os
+import sys
+from pathlib import Path
+
+import pytest
+
+from autogen.code_utils import TIMEOUT_MSG
+from autogen.coding.base import CodeBlock, CodeExecutor
+
+try:
+ from autogen.coding.kubernetes import PodCommandLineCodeExecutor
+
+ client = importlib.import_module("kubernetes.client")
+ config = importlib.import_module("kubernetes.config")
+
+ kubeconfig = Path(".kube/config")
+ if os.environ.get("KUBECONFIG", None):
+ kubeconfig = Path(os.environ["KUBECONFIG"])
+ elif sys.platform == "win32":
+ kubeconfig = os.environ["userprofile"] / kubeconfig
+ else:
+ kubeconfig = os.environ["HOME"] / kubeconfig
+
+ if kubeconfig.is_file():
+ config.load_config(config_file=str(kubeconfig))
+ api_client = client.CoreV1Api()
+ api_client.list_namespace()
+ skip_kubernetes_tests = False
+ else:
+ skip_kubernetes_tests = True
+
+ pod_spec = client.V1Pod(
+ metadata=client.V1ObjectMeta(
+ name="abcd", namespace="default", annotations={"sidecar.istio.io/inject": "false"}
+ ),
+ spec=client.V1PodSpec(
+ restart_policy="Never",
+ containers=[
+ client.V1Container(
+ args=["-c", "while true;do sleep 5; done"],
+ command=["/bin/sh"],
+ name="abcd",
+ image="python:3.11-slim",
+ env=[
+ client.V1EnvVar(name="TEST", value="TEST"),
+ client.V1EnvVar(
+ name="POD_NAME",
+ value_from=client.V1EnvVarSource(
+ field_ref=client.V1ObjectFieldSelector(field_path="metadata.name")
+ ),
+ ),
+ ],
+ )
+ ],
+ ),
+ )
+except Exception:
+ skip_kubernetes_tests = True
+
+
+@pytest.mark.skipif(skip_kubernetes_tests, reason="kubernetes not accessible")
+def test_create_default_pod_executor():
+ with PodCommandLineCodeExecutor(namespace="default", kube_config_file=str(kubeconfig)) as executor:
+ assert executor.timeout == 60
+ assert executor.work_dir == Path("/workspace")
+ assert executor._container_name == "autogen-code-exec"
+ assert executor._pod.metadata.name.startswith("autogen-code-exec-")
+ _test_execute_code(executor)
+
+
+@pytest.mark.skipif(skip_kubernetes_tests, reason="kubernetes not accessible")
+def test_create_node_pod_executor():
+ with PodCommandLineCodeExecutor(
+ image="node:22-alpine",
+ namespace="default",
+ work_dir="./app",
+ timeout=30,
+ kube_config_file=str(kubeconfig),
+ execution_policies={"javascript": True},
+ ) as executor:
+ assert executor.timeout == 30
+ assert executor.work_dir == Path("./app")
+ assert executor._container_name == "autogen-code-exec"
+ assert executor._pod.metadata.name.startswith("autogen-code-exec-")
+ assert executor.execution_policies["javascript"]
+
+ # Test single code block.
+ code_blocks = [CodeBlock(code="console.log('hello world!')", language="javascript")]
+ code_result = executor.execute_code_blocks(code_blocks)
+ assert code_result.exit_code == 0 and "hello world!" in code_result.output and code_result.code_file is not None
+
+ # Test multiple code blocks.
+ code_blocks = [
+ CodeBlock(code="console.log('hello world!')", language="javascript"),
+ CodeBlock(code="let a = 100 + 100; console.log(a)", language="javascript"),
+ ]
+ code_result = executor.execute_code_blocks(code_blocks)
+ assert (
+ code_result.exit_code == 0
+ and "hello world!" in code_result.output
+ and "200" in code_result.output
+ and code_result.code_file is not None
+ )
+
+ # Test running code.
+ file_lines = ["console.log('hello world!')", "let a = 100 + 100", "console.log(a)"]
+ code_blocks = [CodeBlock(code="\n".join(file_lines), language="javascript")]
+ code_result = executor.execute_code_blocks(code_blocks)
+ assert (
+ code_result.exit_code == 0
+ and "hello world!" in code_result.output
+ and "200" in code_result.output
+ and code_result.code_file is not None
+ )
+
+
+@pytest.mark.skipif(skip_kubernetes_tests, reason="kubernetes not accessible")
+def test_create_pod_spec_pod_executor():
+ with PodCommandLineCodeExecutor(
+ pod_spec=pod_spec, container_name="abcd", kube_config_file=str(kubeconfig)
+ ) as executor:
+ assert executor.timeout == 60
+ assert executor._container_name == "abcd"
+ assert executor._pod.metadata.name == pod_spec.metadata.name
+ assert executor._pod.metadata.namespace == pod_spec.metadata.namespace
+ _test_execute_code(executor)
+
+ # Test bash script.
+ if sys.platform not in ["win32"]:
+ code_blocks = [CodeBlock(code="echo $TEST $POD_NAME", language="bash")]
+ code_result = executor.execute_code_blocks(code_blocks)
+ assert (
+ code_result.exit_code == 0 and "TEST abcd" in code_result.output and code_result.code_file is not None
+ )
+
+
+@pytest.mark.skipif(skip_kubernetes_tests, reason="kubernetes not accessible")
+def test_pod_executor_timeout():
+ with PodCommandLineCodeExecutor(namespace="default", timeout=5, kube_config_file=str(kubeconfig)) as executor:
+ assert executor.timeout == 5
+ assert executor.work_dir == Path("/workspace")
+ assert executor._container_name == "autogen-code-exec"
+ assert executor._pod.metadata.name.startswith("autogen-code-exec-")
+ # Test running code.
+ file_lines = ["import time", "time.sleep(10)", "a = 100 + 100", "print(a)"]
+ code_blocks = [CodeBlock(code="\n".join(file_lines), language="python")]
+ code_result = executor.execute_code_blocks(code_blocks)
+ assert code_result.exit_code == 124 and TIMEOUT_MSG in code_result.output and code_result.code_file is not None
+
+
+def _test_execute_code(executor: CodeExecutor) -> None:
+ # Test single code block.
+ code_blocks = [CodeBlock(code="import sys; print('hello world!')", language="python")]
+ code_result = executor.execute_code_blocks(code_blocks)
+ assert code_result.exit_code == 0 and "hello world!" in code_result.output and code_result.code_file is not None
+
+ # Test multiple code blocks.
+ code_blocks = [
+ CodeBlock(code="import sys; print('hello world!')", language="python"),
+ CodeBlock(code="a = 100 + 100; print(a)", language="python"),
+ ]
+ code_result = executor.execute_code_blocks(code_blocks)
+ assert (
+ code_result.exit_code == 0
+ and "hello world!" in code_result.output
+ and "200" in code_result.output
+ and code_result.code_file is not None
+ )
+
+ # Test bash script.
+ if sys.platform not in ["win32"]:
+ code_blocks = [CodeBlock(code="echo 'hello world!'", language="bash")]
+ code_result = executor.execute_code_blocks(code_blocks)
+ assert code_result.exit_code == 0 and "hello world!" in code_result.output and code_result.code_file is not None
+
+ # Test running code.
+ file_lines = ["import sys", "print('hello world!')", "a = 100 + 100", "print(a)"]
+ code_blocks = [CodeBlock(code="\n".join(file_lines), language="python")]
+ code_result = executor.execute_code_blocks(code_blocks)
+ assert (
+ code_result.exit_code == 0
+ and "hello world!" in code_result.output
+ and "200" in code_result.output
+ and code_result.code_file is not None
+ )
+
+ # Test running code has filename.
+ file_lines = ["# filename: test.py", "import sys", "print('hello world!')", "a = 100 + 100", "print(a)"]
+ code_blocks = [CodeBlock(code="\n".join(file_lines), language="python")]
+ code_result = executor.execute_code_blocks(code_blocks)
+ print(code_result.code_file)
+ assert (
+ code_result.exit_code == 0
+ and "hello world!" in code_result.output
+ and "200" in code_result.output
+ and code_result.code_file.find("test.py") > 0
+ )
+
+ # Test error code.
+ code_blocks = [CodeBlock(code="print(sys.platform)", language="python")]
+ code_result = executor.execute_code_blocks(code_blocks)
+ assert code_result.exit_code == 1 and "Traceback" in code_result.output and code_result.code_file is not None
diff --git a/website/blog/2023-10-18-RetrieveChat/index.mdx b/website/blog/2023-10-18-RetrieveChat/index.mdx
index 9a6c2378723..84bcca5b306 100644
--- a/website/blog/2023-10-18-RetrieveChat/index.mdx
+++ b/website/blog/2023-10-18-RetrieveChat/index.mdx
@@ -406,3 +406,4 @@ You can check out more example notebooks for RAG use cases:
- [Using RetrieveChat with Qdrant for Retrieve Augmented Code Generation and Question Answering](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_RetrieveChat_qdrant.ipynb)
- [Using RetrieveChat Powered by PGVector for Retrieve Augmented Code Generation and Question Answering](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_RetrieveChat_pgvector.ipynb)
- [Using RetrieveChat Powered by MongoDB Atlas for Retrieve Augmented Code Generation and Question Answering](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_RetrieveChat_mongodb.ipynb)
+- [Using RetrieveChat Powered by Couchbase for Retrieve Augmented Code Generation and Question Answering](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_RetrieveChat_couchbase.ipynb)
diff --git a/website/docs/Examples.md b/website/docs/Examples.md
index 550c2604659..df06f97fb12 100644
--- a/website/docs/Examples.md
+++ b/website/docs/Examples.md
@@ -55,6 +55,7 @@ Links to notebook examples:
- Browse the Web with Agents - [View Notebook](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_surfer.ipynb)
- **SQL**: Natural Language Text to SQL Query using the [Spider](https://yale-lily.github.io/spider) Text-to-SQL Benchmark - [View Notebook](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_sql_spider.ipynb)
- **Web Scraping**: Web Scraping with Apify - [View Notebook](/docs/notebooks/agentchat_webscraping_with_apify)
+- **Web Crawling**: Crawl entire domain with Spider API - [View Notebook](/docs/notebooks/agentchat_webcrawling_with_spider)
- **Write a software app, task by task, with specially designed functions.** - [View Notebook](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_function_call_code_writing.ipynb).
### Human Involvement
diff --git a/website/docs/ecosystem/agent-memory-with-zep.md b/website/docs/ecosystem/agent-memory-with-zep.md
new file mode 100644
index 00000000000..c4551296dc7
--- /dev/null
+++ b/website/docs/ecosystem/agent-memory-with-zep.md
@@ -0,0 +1,102 @@
+# Agent Memory with Zep
+
+
+
+[Zep](https://www.getzep.com/?utm_source=autogen) is a long-term memory service for agentic applications used by both startups and enterprises. With Zep, you can build personalized, accurate, and production-ready agent applications.
+
+Zep's memory continuously learns facts from interactions with users and your changing business data. With [just two API calls](https://help.getzep.com/memory?utm_source=autogen), you can persist chat history to Zep and recall facts relevant to the state of your agent.
+
+Zep is powered by a temporal Knowledge Graph that allows reasoning with facts as they change. A combination of semantic and graph search enables accurate and low-latency fact retrieval.
+
+Sign up for [Zep Cloud](https://www.getzep.com/?utm_source=autogen) or visit the [Zep Community Edition Repo](https://github.com/getzep/zep).
+
+| Feature | Description |
+| ---------------------------------------------- | ------------------------------------------------------------------------------------- |
+| 💬 **Capture Detailed Conversational Context** | Zep's Knowledge Graph-based memory captures episodic, semantic, and temporal contexts |
+| 🗄️ **Business Data is Context, too** | Zep is able to extract facts from JSON and unstructured text as well |
+| ⚙️ **Tailor For Your Business** | Fact Ratings and other tools allow you to fine-tune retrieval for your use case |
+| ⚡️ **Instant Memory Retrieval** | Retrieve relevant facts in under 100ms |
+| 🔐 **Compliance & Security** | User Privacy Management, SOC 2 Type II certification, and other controls |
+| 🖼️ **Framework Agnostic & Future-Proof** | Use with AutoGen or any other framework, current or future |
+
+
+ Zep Community Edition Walkthrough
+
+
+
+
+
+
+ User Chat Session and Facts
+
+
+
+
+
+
+ Implementing Fact Ratings
+
+
+
+
+
+## How Zep works
+
+1. Add chat messages or data artifacts to Zep during each user interaction or agent event.
+2. Zep intelligently integrates new information into the user's (or groups of users) Knowledge Graph, updating existing context as needed.
+3. Retrieve relevant facts from Zep for subsequent interactions or events.
+
+Zep's temporal Knowledge Graph maintains contextual information about facts, enabling reasoning about state changes and providing data provenance insights. Each fact includes `valid_at` and `invalid_at` dates, allowing agents to track changes in user preferences, traits, or environment.
+
+## Zep is fast
+
+Retrieving facts is simple and very fast. Unlike other memory solutions, Zep does not use agents to ensure facts are relevant. It precomputes facts, entity summaries, and other artifacts asynchronously. For on-premise use, retrieval speed primarily depends on your embedding service's performance.
+
+## Zep supports many types of data
+
+You can add a variety of data artifacts to Zep:
+
+- Adding chat history messages.
+- Ingestion of JSON and unstructured text.
+
+Zep supports chat session, user, and group-level graphs. Group graphs allow for capturing organizational knowledge.
+
+## Getting Started
+
+### Zep Cloud
+
+1. Sign up for [Zep Cloud](https://www.getzep.com?utm_source=autogen) and create a [Project API Key](https://help.getzep.com/projects?utm_source=autogen).
+
+2. Install one of the [Zep Python, TypeScript or Go SDKs](https://help.getzep.com/sdks?utm_source=autogen). Python instructions shown below.
+
+```shell
+pip install zep-cloud
+```
+
+3. Initialize a client
+
+```python
+import os
+from zep_cloud.client import AsyncZep
+
+API_KEY = os.environ.get('ZEP_API_KEY')
+client = AsyncZep(
+ api_key=API_KEY,
+)
+```
+
+3. Review the Zep and Autogen [notebook example](/docs/notebooks/agent_memory_using_zep/) for agent-building best practices.
+
+### Zep Community Edition
+
+Follow the [Getting Started guide](https://help.getzep.com/ce/quickstart?utm_source=autogen) or visit the [GitHub Repo](https://github.com/getzep/zep?utm_source=autogen).
+
+## Autogen + Zep examples
+
+- [Autogen Agents with Zep Memory Notebook](/docs/notebooks/agent_memory_using_zep/)
+
+## Extra links
+
+- [📙 Documentation](https://help.getzep.com/?utm_source=autogen)
+- [🐦 Twitter / X](https://x.com/zep_ai/)
+- [📢 Discord](https://discord.com/invite/W8Kw6bsgXQ)
diff --git a/website/docs/ecosystem/img/ecosystem-zep-ce-walkthrough.png b/website/docs/ecosystem/img/ecosystem-zep-ce-walkthrough.png
new file mode 100644
index 00000000000..b6937a04398
--- /dev/null
+++ b/website/docs/ecosystem/img/ecosystem-zep-ce-walkthrough.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c0829b29a48ca05e2694aca00446ef5768c1b8edec56ce5035527f25f9ee4c81
+size 421633
diff --git a/website/docs/ecosystem/img/ecosystem-zep-fact-ratings.png b/website/docs/ecosystem/img/ecosystem-zep-fact-ratings.png
new file mode 100644
index 00000000000..a4b12f8a96f
--- /dev/null
+++ b/website/docs/ecosystem/img/ecosystem-zep-fact-ratings.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:179241bd4fa3ed89d721deeb1810a31b9838e7f54582d521bd91f29cbae044f2
+size 233905
diff --git a/website/docs/ecosystem/img/ecosystem-zep-session.gif b/website/docs/ecosystem/img/ecosystem-zep-session.gif
new file mode 100644
index 00000000000..5ff9eb5dbfd
Binary files /dev/null and b/website/docs/ecosystem/img/ecosystem-zep-session.gif differ
diff --git a/website/docs/topics/code-execution/kubernetes-pod-commandline-code-executor.ipynb b/website/docs/topics/code-execution/kubernetes-pod-commandline-code-executor.ipynb
new file mode 100644
index 00000000000..2cad17e0deb
--- /dev/null
+++ b/website/docs/topics/code-execution/kubernetes-pod-commandline-code-executor.ipynb
@@ -0,0 +1,773 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Kubernetes Pod Commandline Code Executor"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The `PodCommandLineCodeExecutor` in the `autogen.coding.kubernetes` module is designed to execute code blocks using a pod in Kubernetes.\n",
+ "It functions similarly to the `DockerCommandLineCodeExecutor`, but specifically creates container within Kubernetes environments.\n",
+ "\n",
+ "There are two condition to use PodCommandLineCodeExecutor.\n",
+ "- Access to a Kubernetes cluster\n",
+ "- installation `autogen` with the extra requirements `'pyautogen[kubernetes]'`\n",
+ "\n",
+ "For local development and testing, this document uses a Minikube cluster.\n",
+ "\n",
+ "Minikube is a tool that allows you to run a single-node Kubernetes cluster on you local machine. \n",
+ "You can refer to the link below for installation and setup of Minikube.\n",
+ "\n",
+ "🔗 https://minikube.sigs.k8s.io/docs/start/"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Access kubernetes cluster"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "There are four options PodCommandLineCodeExecutor to access kubernetes API server.\n",
+ "- default kubeconfig file path: `~/.kube/config`\n",
+ "- Provide a custom kubeconfig file path using the `kube_config_file` argument of `PodCommandLineCodeExecutor`.\n",
+ "- Set the kubeconfig file path using the `KUBECONFIG` environment variable.\n",
+ "- Provide token from Kubernetes ServiceAccount with sufficient permissions"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Generally, if kubeconfig file is located in `~/.kube/config`, there's no need to provide kubeconfig file path on parameter or environment variables.\n",
+ "\n",
+ "The tutorial of providing ServiceAccount Token is in the last section"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Example\n",
+ "\n",
+ "In order to use kubernetes Pod based code executor, you need to install Kubernetes Python SDK.\n",
+ "\n",
+ "You can do this by running the following command:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pip install 'kubernetes>=27'"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Alternatively, you can install it with the extra features for Kubernetes:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pip install 'autogen-agentchat[kubernetes]~=0.2'"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To provide kubeconfig file path with environment variable, It can be added with `os.environ[\"KUBECONFIG\"]`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "# Set the KUBECONFIG environment variable\n",
+ "# if the kubeconfig file is not in the default location(~/.kube/config).\n",
+ "os.environ[\"KUBECONFIG\"] = \"path/to/your/kubeconfig\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from autogen.coding import CodeBlock\n",
+ "from autogen.coding.kubernetes import PodCommandLineCodeExecutor"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "exit_code=0 output='Hello, World!\\n' code_file='/workspace/tmp_code_07da107bb575cc4e02b0e1d6d99cc204.py'\n"
+ ]
+ }
+ ],
+ "source": [
+ "with PodCommandLineCodeExecutor(\n",
+ " namespace=\"default\",\n",
+ " # kube_config_file=\"kubeconfig/file/path\" # If you have another kubeconfig file, you can add it on kube_config_file argument\n",
+ ") as executor:\n",
+ " print(\n",
+ " executor.execute_code_blocks(\n",
+ " # Example of executing a simple Python code block within a Kubernetes pod.\n",
+ " code_blocks=[\n",
+ " CodeBlock(language=\"python\", code=\"print('Hello, World!')\"),\n",
+ " ]\n",
+ " )\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Using a context manager(the `with` statement), the pod created by `PodCommandLineCodeExecutor` is automatically deleted after the tasks are completed.\n",
+ "\n",
+ "Although the pod is automatically deleted when using a context manager, you might sometimes need to delete it manually. You can do this using `stop()` method, as shown below:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "executor = PodCommandLineCodeExecutor(namespace=\"default\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "NAME READY STATUS RESTARTS AGE\n",
+ "autogen-code-exec-afd217ac-f77b-4ede-8c53-1297eca5ec64 1/1 Running 0 10m\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%bash\n",
+ "# This command lists all pods in the default namespace. \n",
+ "# The default pod name follows the format autogen-code-exec-{uuid.uuid4()}.\n",
+ "kubectl get pod -n default"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "python:3-slim"
+ ]
+ }
+ ],
+ "source": [
+ "%%bash\n",
+ "# This command shows container's image in the pod.\n",
+ "# The default container image is python:3-slim\n",
+ "kubectl get pod autogen-code-exec-afd217ac-f77b-4ede-8c53-1297eca5ec64 -o jsonpath={.spec.containers[0].image}"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "executor.stop()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To use a different container image for code executor pod, specify the desired image tag using `image` argument.\n",
+ "\n",
+ "`PodCommandLineCodeExecutor` has a default execution policy that allows Python and shell script code blocks. You can enable other languages with `execution_policies` argument."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "exit_code=0 output='Hello, World!\\n' code_file='app/tmp_code_8c34c8586cb47943728afe1297b7a51c.js'\n"
+ ]
+ }
+ ],
+ "source": [
+ "with PodCommandLineCodeExecutor(\n",
+ " image=\"node:22-alpine\", # Specifies the runtime environments using a container image\n",
+ " namespace=\"default\",\n",
+ " work_dir=\"./app\", # Directory within the container where code block files are stored\n",
+ " timeout=10, # Timeout in seconds for pod creation and code block execution (default is 60 seconds)\n",
+ " execution_policies={\n",
+ " \"javascript\": True\n",
+ " }, # Enable execution of Javascript code blocks by updating execution policies\n",
+ ") as executor:\n",
+ " print(\n",
+ " executor.execute_code_blocks(\n",
+ " code_blocks=[\n",
+ " CodeBlock(language=\"javascript\", code=\"console.log('Hello, World!')\"),\n",
+ " ]\n",
+ " )\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "If you want to apply custom settings for executor pod, such as annotations, environment variables, commands, volumes etc., \n",
+ "you can provide a custom pod specification using `kubernetes.client.V1Pod` format.\n",
+ "\n",
+ "The `container_name` argument should also be provided because `PodCommandLineCodeExecutor` does not automatically recognize the container where code blocks will be executed."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from kubernetes import client\n",
+ "\n",
+ "pod = client.V1Pod(\n",
+ " metadata=client.V1ObjectMeta(name=\"abcd\", namespace=\"default\", annotations={\"sidecar.istio.io/inject\": \"false\"}),\n",
+ " spec=client.V1PodSpec(\n",
+ " restart_policy=\"Never\",\n",
+ " containers=[\n",
+ " client.V1Container(\n",
+ " args=[\"-c\", \"while true;do sleep 5; done\"],\n",
+ " command=[\"/bin/sh\"],\n",
+ " name=\"abcd\", # container name where code blocks will be executed should be provided using `container_name` argument\n",
+ " image=\"python:3.11-slim\",\n",
+ " env=[\n",
+ " client.V1EnvVar(name=\"TEST\", value=\"TEST\"),\n",
+ " client.V1EnvVar(\n",
+ " name=\"POD_NAME\",\n",
+ " value_from=client.V1EnvVarSource(\n",
+ " field_ref=client.V1ObjectFieldSelector(field_path=\"metadata.name\")\n",
+ " ),\n",
+ " ),\n",
+ " ],\n",
+ " )\n",
+ " ],\n",
+ " ),\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "exit_code=0 output='Hello, World!\\n' code_file='/autogen/tmp_code_07da107bb575cc4e02b0e1d6d99cc204.py'\n",
+ "exit_code=0 output='TEST abcd\\n' code_file='/autogen/tmp_code_202399627ea7fb8d8e816f4910b7f87b.sh'\n"
+ ]
+ }
+ ],
+ "source": [
+ "with PodCommandLineCodeExecutor(\n",
+ " pod_spec=pod, # custom executor pod spec\n",
+ " container_name=\"abcd\", # To use custom executor pod spec, container_name where code block will be executed should be specified\n",
+ " work_dir=\"/autogen\",\n",
+ " timeout=60,\n",
+ ") as executor:\n",
+ " print(\n",
+ " executor.execute_code_blocks(\n",
+ " code_blocks=[\n",
+ " CodeBlock(language=\"python\", code=\"print('Hello, World!')\"),\n",
+ " ]\n",
+ " )\n",
+ " )\n",
+ " print(\n",
+ " executor.execute_code_blocks(\n",
+ " code_blocks=[\n",
+ " CodeBlock(\n",
+ " code=\"echo $TEST $POD_NAME\", language=\"bash\"\n",
+ " ), # echo environment variables specified in pod_spec\n",
+ " ]\n",
+ " )\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Integrates with AutoGen Agents"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "`PodCommandLineCodeExecutor` can be integrated with Agents."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from autogen import config_list_from_json\n",
+ "\n",
+ "config_list = config_list_from_json(\n",
+ " env_or_file=\"OAI_CONFIG_LIST\",\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33mcode_executor_agent\u001b[0m (to code_writer):\n",
+ "\n",
+ "Write Python code to calculate the moves of disk on tower of hanoi with 3 disks\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33mcode_writer\u001b[0m (to code_executor_agent):\n",
+ "\n",
+ "The problem of the Tower of Hanoi with 3 disks involves moving the disks from one peg to another, following these rules:\n",
+ "1. Only one disk can be moved at a time.\n",
+ "2. Each move consists of taking the upper disk from one of the stacks and placing it on top of another stack or on an empty peg.\n",
+ "3. No disk may be placed on top of a smaller disk.\n",
+ "\n",
+ "In the solution, I will use a recursive function to calculate the moves and print them out. Here's the Python code to accomplish this:\n",
+ "\n",
+ "```python\n",
+ "def tower_of_hanoi(n, from_rod, to_rod, aux_rod):\n",
+ " if n == 1:\n",
+ " print(f\"Move disk 1 from rod {from_rod} to rod {to_rod}\")\n",
+ " return\n",
+ " tower_of_hanoi(n-1, from_rod, aux_rod, to_rod)\n",
+ " print(f\"Move disk {n} from rod {from_rod} to rod {to_rod}\")\n",
+ " tower_of_hanoi(n-1, aux_rod, to_rod, from_rod)\n",
+ "\n",
+ "n = 3 # Number of disks\n",
+ "tower_of_hanoi(n, 'A', 'C', 'B') # A, B and C are names of the rods\n",
+ "```\n",
+ "\n",
+ "This script defines a function `tower_of_hanoi` that will print out each move necessary to solve the Tower of Hanoi problem with the specified number of disks `n`. This specific setup will solve for 3 disks moving from rod 'A' to rod 'C' with the help of rod 'B'.\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[31m\n",
+ ">>>>>>>> EXECUTING CODE BLOCK (inferred language is python)...\u001b[0m\n",
+ "\u001b[33mcode_executor_agent\u001b[0m (to code_writer):\n",
+ "\n",
+ "exitcode: 0 (execution succeeded)\n",
+ "Code output: Move disk 1 from rod A to rod C\n",
+ "Move disk 2 from rod A to rod B\n",
+ "Move disk 1 from rod C to rod B\n",
+ "Move disk 3 from rod A to rod C\n",
+ "Move disk 1 from rod B to rod A\n",
+ "Move disk 2 from rod B to rod C\n",
+ "Move disk 1 from rod A to rod C\n",
+ "\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33mcode_writer\u001b[0m (to code_executor_agent):\n",
+ "\n",
+ "The execution of the provided code successfully calculated and printed the moves for solving the Tower of Hanoi with 3 disks. Here are the steps it performed:\n",
+ "\n",
+ "1. Move disk 1 from rod A to rod C.\n",
+ "2. Move disk 2 from rod A to rod B.\n",
+ "3. Move disk 1 from rod C to rod B.\n",
+ "4. Move disk 3 from rod A to rod C.\n",
+ "5. Move disk 1 from rod B to rod A.\n",
+ "6. Move disk 2 from rod B to rod C.\n",
+ "7. Move disk 1 from rod A to rod C.\n",
+ "\n",
+ "This sequence effectively transfers all disks from rod A to rod C using rod B as an auxiliary, following the rules of the Tower of Hanoi puzzle. If you have any more tasks or need further explanation, feel free to ask!\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33mcode_executor_agent\u001b[0m (to code_writer):\n",
+ "\n",
+ "\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n"
+ ]
+ }
+ ],
+ "source": [
+ "from autogen import ConversableAgent\n",
+ "\n",
+ "# The code writer agent's system message is to instruct the LLM on how to\n",
+ "# use the code executor with python or shell script code\n",
+ "code_writer_system_message = \"\"\"\n",
+ "You have been given coding capability to solve tasks using Python code.\n",
+ "In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.\n",
+ " 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.\n",
+ " 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.\n",
+ "Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.\n",
+ "When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.\n",
+ "If you want the user to save the code in a file before executing it, put # filename: inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.\n",
+ "\"\"\"\n",
+ "with PodCommandLineCodeExecutor(namespace=\"default\") as executor:\n",
+ "\n",
+ " code_executor_agent = ConversableAgent(\n",
+ " name=\"code_executor_agent\",\n",
+ " llm_config=False,\n",
+ " code_execution_config={\n",
+ " \"executor\": executor,\n",
+ " },\n",
+ " human_input_mode=\"NEVER\",\n",
+ " )\n",
+ "\n",
+ " code_writer_agent = ConversableAgent(\n",
+ " \"code_writer\",\n",
+ " system_message=code_writer_system_message,\n",
+ " llm_config={\"config_list\": config_list},\n",
+ " code_execution_config=False, # Turn off code execution for this agent.\n",
+ " max_consecutive_auto_reply=2,\n",
+ " human_input_mode=\"NEVER\",\n",
+ " )\n",
+ "\n",
+ " chat_result = code_executor_agent.initiate_chat(\n",
+ " code_writer_agent, message=\"Write Python code to calculate the moves of disk on tower of hanoi with 10 disks\"\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "id": "93802984-3207-430b-a205-82f0a77df2b2",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "ChatResult(chat_id=None,\n",
+ " chat_history=[{'content': 'Write Python code to calculate the moves '\n",
+ " 'of disk on tower of hanoi with 3 disks',\n",
+ " 'name': 'code_executor_agent',\n",
+ " 'role': 'assistant'},\n",
+ " {'content': 'The problem of the Tower of Hanoi with 3 '\n",
+ " 'disks involves moving the disks from one '\n",
+ " 'peg to another, following these rules:\\n'\n",
+ " '1. Only one disk can be moved at a '\n",
+ " 'time.\\n'\n",
+ " '2. Each move consists of taking the '\n",
+ " 'upper disk from one of the stacks and '\n",
+ " 'placing it on top of another stack or on '\n",
+ " 'an empty peg.\\n'\n",
+ " '3. No disk may be placed on top of a '\n",
+ " 'smaller disk.\\n'\n",
+ " '\\n'\n",
+ " 'In the solution, I will use a recursive '\n",
+ " 'function to calculate the moves and '\n",
+ " \"print them out. Here's the Python code \"\n",
+ " 'to accomplish this:\\n'\n",
+ " '\\n'\n",
+ " '```python\\n'\n",
+ " 'def tower_of_hanoi(n, from_rod, to_rod, '\n",
+ " 'aux_rod):\\n'\n",
+ " ' if n == 1:\\n'\n",
+ " ' print(f\"Move disk 1 from rod '\n",
+ " '{from_rod} to rod {to_rod}\")\\n'\n",
+ " ' return\\n'\n",
+ " ' tower_of_hanoi(n-1, from_rod, '\n",
+ " 'aux_rod, to_rod)\\n'\n",
+ " ' print(f\"Move disk {n} from rod '\n",
+ " '{from_rod} to rod {to_rod}\")\\n'\n",
+ " ' tower_of_hanoi(n-1, aux_rod, to_rod, '\n",
+ " 'from_rod)\\n'\n",
+ " '\\n'\n",
+ " 'n = 3 # Number of disks\\n'\n",
+ " \"tower_of_hanoi(n, 'A', 'C', 'B') # A, B \"\n",
+ " 'and C are names of the rods\\n'\n",
+ " '```\\n'\n",
+ " '\\n'\n",
+ " 'This script defines a function '\n",
+ " '`tower_of_hanoi` that will print out '\n",
+ " 'each move necessary to solve the Tower '\n",
+ " 'of Hanoi problem with the specified '\n",
+ " 'number of disks `n`. This specific setup '\n",
+ " 'will solve for 3 disks moving from rod '\n",
+ " \"'A' to rod 'C' with the help of rod 'B'.\",\n",
+ " 'name': 'code_writer',\n",
+ " 'role': 'user'},\n",
+ " {'content': 'exitcode: 0 (execution succeeded)\\n'\n",
+ " 'Code output: Move disk 1 from rod A to '\n",
+ " 'rod C\\n'\n",
+ " 'Move disk 2 from rod A to rod B\\n'\n",
+ " 'Move disk 1 from rod C to rod B\\n'\n",
+ " 'Move disk 3 from rod A to rod C\\n'\n",
+ " 'Move disk 1 from rod B to rod A\\n'\n",
+ " 'Move disk 2 from rod B to rod C\\n'\n",
+ " 'Move disk 1 from rod A to rod C\\n',\n",
+ " 'name': 'code_executor_agent',\n",
+ " 'role': 'assistant'},\n",
+ " {'content': 'The execution of the provided code '\n",
+ " 'successfully calculated and printed the '\n",
+ " 'moves for solving the Tower of Hanoi '\n",
+ " 'with 3 disks. Here are the steps it '\n",
+ " 'performed:\\n'\n",
+ " '\\n'\n",
+ " '1. Move disk 1 from rod A to rod C.\\n'\n",
+ " '2. Move disk 2 from rod A to rod B.\\n'\n",
+ " '3. Move disk 1 from rod C to rod B.\\n'\n",
+ " '4. Move disk 3 from rod A to rod C.\\n'\n",
+ " '5. Move disk 1 from rod B to rod A.\\n'\n",
+ " '6. Move disk 2 from rod B to rod C.\\n'\n",
+ " '7. Move disk 1 from rod A to rod C.\\n'\n",
+ " '\\n'\n",
+ " 'This sequence effectively transfers all '\n",
+ " 'disks from rod A to rod C using rod B as '\n",
+ " 'an auxiliary, following the rules of the '\n",
+ " 'Tower of Hanoi puzzle. If you have any '\n",
+ " 'more tasks or need further explanation, '\n",
+ " 'feel free to ask!',\n",
+ " 'name': 'code_writer',\n",
+ " 'role': 'user'},\n",
+ " {'content': '',\n",
+ " 'name': 'code_executor_agent',\n",
+ " 'role': 'assistant'}],\n",
+ " summary='',\n",
+ " cost={'usage_excluding_cached_inference': {'total_cost': 0},\n",
+ " 'usage_including_cached_inference': {'gpt-4-turbo-2024-04-09': {'completion_tokens': 499,\n",
+ " 'cost': 0.0269,\n",
+ " 'prompt_tokens': 1193,\n",
+ " 'total_tokens': 1692},\n",
+ " 'total_cost': 0.0269}},\n",
+ " human_input=[])\n"
+ ]
+ }
+ ],
+ "source": [
+ "import pprint\n",
+ "\n",
+ "pprint.pprint(chat_result)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Use ServiceAccount token"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "If a `PodCommandLineCodeExecutor` instance runs inside of Kubernetes Pod, it can use a token generated from a ServiceAccount to access Kubernetes API server.\n",
+ "\n",
+ "The `PodCommandLineCodeExecutor` requires the following permissions:\n",
+ "the verbs `create`, `get`, `delete` for `pods` resource, and the verb `get` for resources `pods/status`, `pods/exec`.\n",
+ "\n",
+ "You can create a ServiceAccount, ClusterRole and RoleBinding with `kubectl` as shown below:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "serviceaccount/autogen-executor-sa created\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%bash\n",
+ "# Create ServiceAccount on default namespace\n",
+ "kubectl create sa autogen-executor-sa"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "clusterrole.rbac.authorization.k8s.io/autogen-executor-role created\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%bash\n",
+ "# Create ClusterRole that has sufficient permissions\n",
+ "kubectl create clusterrole autogen-executor-role \\\n",
+ " --verb=get,create,delete --resource=pods,pods/status,pods/exec"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "rolebinding.rbac.authorization.k8s.io/autogen-executor-rolebinding created\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%bash\n",
+ "# Create RoleBinding that binds ClusterRole and ServiceAccount\n",
+ "kubectl create rolebinding autogen-executor-rolebinding \\\n",
+ " --clusterrole autogen-executor-role --serviceaccount default:autogen-executor-sa"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "A pod with a previously created ServiceAccount can be launched using the following command."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "pod/autogen-executor created\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%bash\n",
+ "# create pod with serviceaccount\n",
+ "kubectl run autogen-executor --image python:3 \\\n",
+ " --overrides='{\"spec\":{\"serviceAccount\": \"autogen-executor-sa\"}}' \\\n",
+ " -- bash -c 'pip install pyautogen[kubernetes] && sleep inifinity'"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can execute `PodCommandLineCodeExecutor` inside the Python interpreter process from `autogen-executor` Pod.\n",
+ "\n",
+ "It creates new pod for code execution using token generated from `autogen-executor-sa` ServiceAccount."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%%bash\n",
+ "kubectl exec autogen-executor -it -- python"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "kube_config_path not provided and default location (~/.kube/config) does not exist. Using inCluster Config. This might not work.\n",
+ "exit_code=0 output='Hello, World!\\n' code_file='/workspace/tmp_code_07da107bb575cc4e02b0e1d6d99cc204.py'"
+ ]
+ }
+ ],
+ "source": [
+ "from autogen.coding import CodeBlock\n",
+ "from autogen.coding.kubernetes import PodCommandLineCodeExecutor\n",
+ "\n",
+ "# PodCommandLineCodeExecutor uses token generated from ServiceAccount by kubernetes incluster config\n",
+ "with PodCommandLineCodeExecutor() as executor:\n",
+ " print(\n",
+ " executor.execute_code_blocks(\n",
+ " code_blocks=[\n",
+ " CodeBlock(language=\"python\", code=\"print('Hello, World!')\"),\n",
+ " ]\n",
+ " )\n",
+ " )"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "autogen",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.12"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/website/docs/topics/retrieval_augmentation.md b/website/docs/topics/retrieval_augmentation.md
index 1ac32e85fef..3fa9d5754fd 100644
--- a/website/docs/topics/retrieval_augmentation.md
+++ b/website/docs/topics/retrieval_augmentation.md
@@ -127,6 +127,7 @@ For more detailed examples and notebooks showcasing the usage of retrieval augme
- Automated Code Generation and Question Answering with [PGVector](https://github.com/pgvector/pgvector) based Retrieval Augmented Agents - [View Notebook](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_RetrieveChat_pgvector.ipynb)
- Automated Code Generation and Question Answering with [Qdrant](https://qdrant.tech/) based Retrieval Augmented Agents - [View Notebook](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_RetrieveChat_qdrant.ipynb)
- Automated Code Generation and Question Answering with [MongoDB Atlas](https://www.mongodb.com/) based Retrieval Augmented Agents - [View Notebook](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_RetrieveChat_mongodb.ipynb)
+- Automated Code Generation and Question Answering with [Couchbase](https://www.couchbase.com/) based Retrieval Augmented Agents - [View Notebook](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_RetrieveChat_couchbase.ipynb)
- Chat with OpenAI Assistant with Retrieval Augmentation - [View Notebook](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_oai_assistant_retrieval.ipynb)
- **RAG**: Group Chat with Retrieval Augmented Generation (with 5 group member agents and 1 manager agent) - [View Notebook](/docs/notebooks/agentchat_groupchat_RAG)
diff --git a/website/src/pages/index.js b/website/src/pages/index.js
index 203c72c0222..99d423e0370 100644
--- a/website/src/pages/index.js
+++ b/website/src/pages/index.js
@@ -1,24 +1,44 @@
-import React from 'react';
-import clsx from 'clsx';
-import Layout from '@theme/Layout';
-import Link from '@docusaurus/Link';
-import useDocusaurusContext from '@docusaurus/useDocusaurusContext';
-import styles from './index.module.css';
-import HomepageFeatures from '../components/HomepageFeatures';
+import React from "react";
+import clsx from "clsx";
+import Layout from "@theme/Layout";
+import Link from "@docusaurus/Link";
+import useDocusaurusContext from "@docusaurus/useDocusaurusContext";
+import styles from "./index.module.css";
+import HomepageFeatures from "../components/HomepageFeatures";
function HomepageHeader() {
- const {siteConfig} = useDocusaurusContext();
+ const { siteConfig } = useDocusaurusContext();
return (
-
+
{siteConfig.title}
{siteConfig.tagline}
-
- Getting Started - 3min ⏱️
-
+
+
+ Get Started
+
+
+ Current stable version of AutoGen (autogen-agentchat~=0.2)
+
+
+
+
+ Preview v0.4
+
+
+ A new event driven, asynchronous architecture for AutoGen
+
+
@@ -26,11 +46,12 @@ function HomepageHeader() {
}
export default function Home() {
- const {siteConfig} = useDocusaurusContext();
+ const { siteConfig } = useDocusaurusContext();
return (
+ description="Enabling Next-Gen LLM Applications via Multi-Agent Conversation Framework"
+ >
diff --git a/website/src/pages/index.module.css b/website/src/pages/index.module.css
index 5c0d4f6e20c..883ab870aae 100644
--- a/website/src/pages/index.module.css
+++ b/website/src/pages/index.module.css
@@ -22,4 +22,25 @@
display: flex;
align-items: center;
justify-content: center;
+ gap: 10px;
+}
+
+.buttonWrapper {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ width: 200px;
+}
+
+.buttonLink {
+ width: 100%;
+}
+
+.buttonTagline {
+ font-size: 0.8rem;
+ margin-top: 5px;
+ text-align: center;
+ height: 100px;
+ overflow-y: auto;
+ padding: 5px;
}