Skip to content

Commit

Permalink
Merge branch 'microsoft:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
gunnarku authored Feb 12, 2024
2 parents ec31add + d01063d commit 49fe86f
Show file tree
Hide file tree
Showing 6 changed files with 18 additions and 51 deletions.
8 changes: 4 additions & 4 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -1108,9 +1108,9 @@ def generate_oai_reply(
extracted_response = self._generate_oai_reply_from_client(
client, self._oai_system_message + messages, self.client_cache
)
return True, extracted_response
return (False, None) if extracted_response is None else (True, extracted_response)

def _generate_oai_reply_from_client(self, llm_client, messages, cache):
def _generate_oai_reply_from_client(self, llm_client, messages, cache) -> Union[str, Dict, None]:
# unroll tool_responses
all_messages = []
for message in messages:
Expand All @@ -1132,8 +1132,8 @@ def _generate_oai_reply_from_client(self, llm_client, messages, cache):
extracted_response = llm_client.extract_text_or_completion_object(response)[0]

if extracted_response is None:
warnings.warn("Extracted_response is None.", UserWarning)
return False, None
warnings.warn("Extracted_response from {response} is None.", UserWarning)
return None
# ensure function and tool calls will be accepted when sent back to the LLM
if not isinstance(extracted_response, str) and hasattr(extracted_response, "model_dump"):
extracted_response = model_dump(extracted_response)
Expand Down
2 changes: 1 addition & 1 deletion autogen/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.2.13"
__version__ = "0.2.14"
2 changes: 0 additions & 2 deletions samples/apps/autogen-studio/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,6 @@ Project Structure:

There are two ways to install AutoGen Studio - from PyPi or from source. We **recommend installing from PyPi** unless you plan to modify the source code.

There are two ways to install AutoGen Studio - from PyPi or from source. We **recommend installing from PyPi** unless you plan to modify the source code.

1. **Install from PyPi**

We recommend using a virtual environment (e.g., conda) to avoid conflicts with existing Python packages. With Python 3.10 or newer active in your virtual environment, use pip to install AutoGen Studio:
Expand Down
17 changes: 3 additions & 14 deletions test/agentchat/test_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,18 +68,7 @@ def test_gpt35(human_input_mode="NEVER", max_consecutive_auto_reply=5):
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={
"model": {
"gpt-3.5-turbo",
"gpt-35-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-0301",
"chatgpt-35-turbo-0301",
"gpt-35-turbo-v0301",
"gpt",
},
},
filter_dict={"tags": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"]},
)
llm_config = {
"cache_seed": 42,
Expand Down Expand Up @@ -206,8 +195,8 @@ def generate_init_message(self, question) -> str:


if __name__ == "__main__":
# test_gpt35()
test_create_execute_script(human_input_mode="TERMINATE")
test_gpt35()
# test_create_execute_script(human_input_mode="TERMINATE")
# when GPT-4, i.e., the DEFAULT_MODEL, is used, conversation in the following test
# should terminate in 2-3 rounds of interactions (because is_termination_msg should be true after 2-3 rounds)
# although the max_consecutive_auto_reply is set to 10.
Expand Down
28 changes: 4 additions & 24 deletions test/agentchat/test_cache_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import autogen
from autogen.agentchat import AssistantAgent, UserProxyAgent
from autogen.cache import Cache
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST, here

sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from conftest import skip_openai, skip_redis # noqa: E402
Expand Down Expand Up @@ -107,23 +108,11 @@ def test_disk_cache():


def run_conversation(cache_seed, human_input_mode="NEVER", max_consecutive_auto_reply=5, cache=None):
KEY_LOC = "notebook"
OAI_CONFIG_LIST = "OAI_CONFIG_LIST"
here = os.path.abspath(os.path.dirname(__file__))
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={
"model": {
"gpt-3.5-turbo",
"gpt-35-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-0301",
"chatgpt-35-turbo-0301",
"gpt-35-turbo-v0301",
"gpt",
},
"tags": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"],
},
)
llm_config = {
Expand Down Expand Up @@ -159,7 +148,7 @@ def run_conversation(cache_seed, human_input_mode="NEVER", max_consecutive_auto_

# track how long this takes
user.initiate_chat(assistant, message=coding_task, cache=cache)
return user.chat_messages[list(user.chat_messages.keys())[-0]]
return user.chat_messages[assistant]


def run_groupchat_conversation(cache, human_input_mode="NEVER", max_consecutive_auto_reply=5):
Expand All @@ -170,16 +159,7 @@ def run_groupchat_conversation(cache, human_input_mode="NEVER", max_consecutive_
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={
"model": {
"gpt-3.5-turbo",
"gpt-35-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-0301",
"chatgpt-35-turbo-0301",
"gpt-35-turbo-v0301",
"gpt",
},
"tags": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"],
},
)
llm_config = {
Expand Down
12 changes: 6 additions & 6 deletions test/oai/test_client_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def test_chat_tools_stream() -> None:
config_list = config_list_from_json(
env_or_file=OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={"model": ["gpt-3.5-turbo", "gpt-35-turbo"]},
filter_dict={"tags": ["multitool"]},
)
tools = [
{
Expand All @@ -254,7 +254,6 @@ def test_chat_tools_stream() -> None:
},
},
]
print(f"{config_list=}")
client = OpenAIWrapper(config_list=config_list)
response = client.create(
# the intention is to trigger two tool invocations as a response to a single message
Expand Down Expand Up @@ -294,7 +293,8 @@ def test_completion_stream() -> None:


if __name__ == "__main__":
test_aoai_chat_completion_stream()
test_chat_completion_stream()
test_chat_functions_stream()
test_completion_stream()
# test_aoai_chat_completion_stream()
# test_chat_completion_stream()
# test_chat_functions_stream()
# test_completion_stream()
test_chat_tools_stream()

0 comments on commit 49fe86f

Please sign in to comment.