Skip to content

Commit

Permalink
Validate llm_config passed to ConversableAgent
Browse files Browse the repository at this point in the history
Based on microsoft#1522, this commit implements the additional validation checks in
`ConversableAgent.`

Add the following validation and `raise ValueError` if:

 - The `llm_config` is `None` (validated in `ConversableAgent`).
 - The `llm_config` has no `model` specified and `config_list` is empty
   (validated in `OpenAIWrapper`).
 - The `config_list` has at least one entry, but not all the entries have
   the `model` is specified (validated in `OpenAIWrapper`).

The rest of the changes are code churn to adjust or add the test cases.
  • Loading branch information
gunnarku committed Feb 13, 2024
1 parent ca48838 commit 1dc4118
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 22 deletions.
23 changes: 5 additions & 18 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,31 +136,18 @@ def __init__(
else (lambda x: content_str(x.get("content")) == "TERMINATE")
)

if llm_config is None:
raise ValueError("Please specify the value for 'llm_config'.")

if isinstance(llm_config, dict):
config_list = None
if "config_list" in llm_config:
config_list = llm_config["config_list"]
if config_list is None or len(config_list) == 0:
raise ValueError("Please specify at least one configuration in 'llm_config'.")

# We know that there's at least one entry in the configuration.
# Verify that model is specified as well.
model = None
if "model" in llm_config["config_list"][0]:
model = llm_config["config_list"][0]["model"]
if model is None or len(model) == 0:
raise ValueError("Please specify a value for the 'model' in 'llm_config'.")

if llm_config is False:
self.llm_config = False
self.client = None
else:
self.llm_config = self.DEFAULT_CONFIG.copy()
if isinstance(llm_config, dict):
self.llm_config.update(llm_config)
# We still have a default `llm_config` because the user didn't
# specify anything. This won't work, so raise an error to avoid
# an obscure message from the OpenAI service.
if self.llm_config == self.DEFAULT_CONFIG:
raise ValueError("Please specify the value for 'llm_config'.")
self.client = OpenAIWrapper(**self.llm_config)

# Initialize standalone client cache object.
Expand Down
14 changes: 13 additions & 1 deletion autogen/oai/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,15 +345,27 @@ def __init__(self, *, config_list: Optional[List[Dict[str, Any]]] = None, **base
and additional kwargs.
"""
openai_config, extra_kwargs = self._separate_openai_config(base_config)
# This *may* work if the `llm_config` has specified the `model` attribute,
# so just warn here.
if type(config_list) is list and len(config_list) == 0:
logger.warning("openai client was provided with an empty config_list, which may not be intended.")
logger.warning("OpenAI client was provided with an empty config_list, which may not be intended.")
# If the `llm_config` has no `model` then the call will fail. Abort now.
if "model" not in extra_kwargs:
raise ValueError("Please specify a value for the 'model' in 'llm_config'.")

self._clients: List[ModelClient] = []
self._config_list: List[Dict[str, Any]] = []

if config_list:
config_list = [config.copy() for config in config_list] # make a copy before modifying
for config in config_list:
# We require that each element of `config_list` has a non-empty value
# for `model` specified.
model = None
if "model" in config:
model = config["model"]
if model is None or len(model) == 0:
raise ValueError("Please specify a value for the 'model' in 'config_list'.")
self._register_default_client(config, openai_config) # could modify the config
self._config_list.append(
{**extra_kwargs, **{k: v for k, v in config.items() if k not in self.openai_kwargs}}
Expand Down
4 changes: 3 additions & 1 deletion test/agentchat/contrib/test_web_surfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,9 @@ def test_web_surfer() -> None:
mp.setenv("OPENAI_API_KEY", "mock")
page_size = 4096
web_surfer = WebSurferAgent(
"web_surfer", llm_config={"config_list": []}, browser_config={"viewport_size": page_size}
"web_surfer",
llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]},
browser_config={"viewport_size": page_size},
)

# Sneak a peak at the function map, allowing us to call the functions for testing here
Expand Down
4 changes: 2 additions & 2 deletions test/agentchat/test_conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -786,15 +786,15 @@ def test_register_for_llm_without_configuration():
ConversableAgent(name="agent", llm_config={"config_list": []})
assert False, "Expected ConversableAgent to throw ValueError."
except ValueError as e:
assert e.args[0] == "Please specify at least one configuration in 'llm_config'."
assert e.args[0] == "Please specify a value for the 'model' in 'llm_config'."


def test_register_for_llm_without_model_name():
try:
ConversableAgent(name="agent", llm_config={"config_list": [{"model": "", "api_key": ""}]})
assert False, "Expected ConversableAgent to throw ValueError."
except ValueError as e:
assert e.args[0] == "Please specify a value for the 'model' in 'llm_config'."
assert e.args[0] == "Please specify a value for the 'model' in 'config_list'."


def test_register_for_execution():
Expand Down

0 comments on commit 1dc4118

Please sign in to comment.