Skip to content

Commit

Permalink
[feature] as_studio (#44)
Browse files Browse the repository at this point in the history
  • Loading branch information
qbc2016 authored Mar 7, 2024
1 parent 2a53383 commit 714a758
Show file tree
Hide file tree
Showing 11 changed files with 747 additions and 131 deletions.
68 changes: 38 additions & 30 deletions examples/conversation/conversation.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,36 +5,44 @@
from agentscope.agents.user_agent import UserAgent
from agentscope.pipelines.functional import sequentialpipeline

agentscope.init(
model_configs=[
{
"model_type": "openai",
"config_name": "gpt-3.5-turbo",
"model_name": "gpt-3.5-turbo",
"api_key": "xxx", # Load from env if not provided
"organization": "xxx", # Load from env if not provided
"generate_args": {
"temperature": 0.5,

def main() -> None:
"""A conversation demo"""

agentscope.init(
model_configs=[
{
"model_type": "openai",
"config_name": "gpt-3.5-turbo",
"model_name": "gpt-3.5-turbo",
"api_key": "xxx", # Load from env if not provided
"organization": "xxx", # Load from env if not provided
"generate_args": {
"temperature": 0.5,
},
},
{
"model_type": "post_api_chat",
"config_name": "my_post_api",
"api_url": "https://xxx",
"headers": {},
},
},
{
"model_type": "post_api_chat",
"config_name": "my_post_api",
"api_url": "https://xxx",
"headers": {},
},
],
)
],
)

# Init two agents
dialog_agent = DialogAgent(
name="Assistant",
sys_prompt="You're a helpful assistant.",
model_config_name="gpt-3.5-turbo", # replace by your model config name
)
user_agent = UserAgent()

# start the conversation between user and assistant
x = None
while x is None or x.content != "exit":
x = sequentialpipeline([dialog_agent, user_agent], x)

# Init two agents
dialog_agent = DialogAgent(
name="Assistant",
sys_prompt="You're a helpful assistant.",
model_config_name="gpt-3.5-turbo", # replace by your model config name
)
user_agent = UserAgent()

# start the conversation between user and assistant
x = None
while x is None or x.content != "exit":
x = sequentialpipeline([dialog_agent, user_agent], x)
if __name__ == "__main__":
main()
201 changes: 112 additions & 89 deletions examples/werewolf/werewolf.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from functools import partial

from prompt import Prompts
from utils import (
from werewolf_utils import (
check_winning,
update_alive_players,
majority_vote,
Expand All @@ -15,99 +15,122 @@
from agentscope.pipelines.functional import sequentialpipeline
import agentscope

# default settings
HostMsg = partial(Msg, name="Moderator", echo=True)
healing, poison = True, True
MAX_WEREWOLF_DISCUSSION_ROUND = 3
MAX_GAME_ROUND = 6
# read model and agent configs, and initialize agents automatically
survivors = agentscope.init(
model_configs="./configs/model_configs.json",
agent_configs="./configs/agent_configs.json",
)
roles = ["werewolf", "werewolf", "villager", "villager", "seer", "witch"]
wolves, witch, seer = survivors[:2], survivors[-1], survivors[-2]

# start the game
for i in range(1, MAX_GAME_ROUND + 1):
# night phase, werewolves discuss
hint = HostMsg(content=Prompts.to_wolves.format(n2s(wolves)))
with msghub(wolves, announcement=hint) as hub:
for _ in range(MAX_WEREWOLF_DISCUSSION_ROUND):
x = sequentialpipeline(wolves)
if x.get("agreement", False):
break

# werewolves vote
hint = HostMsg(content=Prompts.to_wolves_vote)
votes = [extract_name_and_id(wolf(hint).content)[0] for wolf in wolves]
# broadcast the result to werewolves
dead_player = [majority_vote(votes)]
hub.broadcast(
HostMsg(content=Prompts.to_wolves_res.format(dead_player[0])),
)
# pylint: disable=too-many-statements
def main() -> None:
"""werewolf game"""
# default settings
HostMsg = partial(Msg, name="Moderator", echo=True)
healing, poison = True, True
MAX_WEREWOLF_DISCUSSION_ROUND = 3
MAX_GAME_ROUND = 6
# read model and agent configs, and initialize agents automatically
survivors = agentscope.init(
model_configs="./configs/model_configs.json",
agent_configs="./configs/agent_configs.json",
)
roles = ["werewolf", "werewolf", "villager", "villager", "seer", "witch"]
wolves, witch, seer = survivors[:2], survivors[-1], survivors[-2]

# start the game
for _ in range(1, MAX_GAME_ROUND + 1):
# night phase, werewolves discuss
hint = HostMsg(content=Prompts.to_wolves.format(n2s(wolves)))
with msghub(wolves, announcement=hint) as hub:
for _ in range(MAX_WEREWOLF_DISCUSSION_ROUND):
x = sequentialpipeline(wolves)
if x.get("agreement", False):
break

# witch
healing_used_tonight = False
if witch in survivors:
if healing:
# werewolves vote
hint = HostMsg(content=Prompts.to_wolves_vote)
votes = [
extract_name_and_id(wolf(hint).content)[0] for wolf in wolves
]
# broadcast the result to werewolves
dead_player = [majority_vote(votes)]
hub.broadcast(
HostMsg(content=Prompts.to_wolves_res.format(dead_player[0])),
)

# witch
healing_used_tonight = False
if witch in survivors:
if healing:
hint = HostMsg(
content=Prompts.to_witch_resurrect.format_map(
{
"witch_name": witch.name,
"dead_name": dead_player[0],
},
),
)
if witch(hint).get("resurrect", False):
healing_used_tonight = True
dead_player.pop()
healing = False

if poison and not healing_used_tonight:
x = witch(HostMsg(content=Prompts.to_witch_poison))
if x.get("eliminate", False):
dead_player.append(extract_name_and_id(x.content)[0])
poison = False

# seer
if seer in survivors:
hint = HostMsg(
content=Prompts.to_witch_resurrect.format_map(
{"witch_name": witch.name, "dead_name": dead_player[0]},
),
content=Prompts.to_seer.format(seer.name, n2s(survivors)),
)
if witch(hint).get("resurrect", False):
healing_used_tonight = True
dead_player.pop()
healing = False

if poison and not healing_used_tonight:
x = witch(HostMsg(content=Prompts.to_witch_poison))
if x.get("eliminate", False):
dead_player.append(extract_name_and_id(x.content)[0])
poison = False

# seer
if seer in survivors:
hint = HostMsg(
content=Prompts.to_seer.format(seer.name, n2s(survivors)),
)
x = seer(hint)

player, idx = extract_name_and_id(x.content)
role = "werewolf" if roles[idx] == "werewolf" else "villager"
hint = HostMsg(content=Prompts.to_seer_result.format(player, role))
seer.observe(hint)

survivors, wolves = update_alive_players(survivors, wolves, dead_player)
if check_winning(survivors, wolves, "Moderator"):
break

# daytime discussion
content = (
Prompts.to_all_danger.format(n2s(dead_player))
if dead_player
else Prompts.to_all_peace
)
hints = [
HostMsg(content=content),
HostMsg(content=Prompts.to_all_discuss.format(n2s(survivors))),
]
with msghub(survivors, announcement=hints) as hub:
# discuss
x = sequentialpipeline(survivors)

# vote
hint = HostMsg(content=Prompts.to_all_vote.format(n2s(survivors)))
votes = [extract_name_and_id(_(hint).content)[0] for _ in survivors]
vote_res = majority_vote(votes)
# broadcast the result to all players
result = HostMsg(content=Prompts.to_all_res.format(vote_res))
hub.broadcast(result)

survivors, wolves = update_alive_players(survivors, wolves, vote_res)
x = seer(hint)

player, idx = extract_name_and_id(x.content)
role = "werewolf" if roles[idx] == "werewolf" else "villager"
hint = HostMsg(content=Prompts.to_seer_result.format(player, role))
seer.observe(hint)

survivors, wolves = update_alive_players(
survivors,
wolves,
dead_player,
)
if check_winning(survivors, wolves, "Moderator"):
break

hub.broadcast(HostMsg(content=Prompts.to_all_continue))
# daytime discussion
content = (
Prompts.to_all_danger.format(n2s(dead_player))
if dead_player
else Prompts.to_all_peace
)
hints = [
HostMsg(content=content),
HostMsg(content=Prompts.to_all_discuss.format(n2s(survivors))),
]
with msghub(survivors, announcement=hints) as hub:
# discuss
x = sequentialpipeline(survivors)

# vote
hint = HostMsg(content=Prompts.to_all_vote.format(n2s(survivors)))
votes = [
extract_name_and_id(_(hint).content)[0] for _ in survivors
]
vote_res = majority_vote(votes)
# broadcast the result to all players
result = HostMsg(content=Prompts.to_all_res.format(vote_res))
hub.broadcast(result)

survivors, wolves = update_alive_players(
survivors,
wolves,
vote_res,
)

if check_winning(survivors, wolves, "Moderator"):
break

hub.broadcast(HostMsg(content=Prompts.to_all_continue))


if __name__ == "__main__":
main()
File renamed without changes.
10 changes: 9 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@

test_requires = ["pytest", "pytest-cov", "pre-commit"]

gradio_requires = ["gradio==4.19.1", "modelscope_studio==0.0.5"]

# released requires
minimal_requires = [
"loguru",
Expand All @@ -47,7 +49,7 @@
"Flask==3.0.0",
"Flask-Cors==4.0.0",
"Flask-SocketIO==5.3.6",
"dashscope",
"dashscope==1.14.1",
]

distribute_requires = minimal_requires + rpc_requires
Expand All @@ -60,6 +62,7 @@
+ service_requires
+ doc_requires
+ test_requires
+ gradio_requires
)

with open("README.md", "r", encoding="UTF-8") as fh:
Expand Down Expand Up @@ -93,4 +96,9 @@
"Operating System :: OS Independent",
],
python_requires=">=3.9",
entry_points={
"console_scripts": [
"as_studio=agentscope.web.studio.studio:run_app",
],
},
)
5 changes: 2 additions & 3 deletions src/agentscope/agents/user_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

from agentscope.agents import AgentBase
from agentscope.message import Msg
from agentscope.web.studio.utils import user_input


class UserAgent(AgentBase):
Expand Down Expand Up @@ -62,7 +63,7 @@ def reply(
# TODO: To avoid order confusion, because `input` print much quicker
# than logger.chat
time.sleep(0.5)
content = input(f"{self.name}: ")
content = user_input()

kwargs = {}
if required_keys is not None:
Expand All @@ -85,8 +86,6 @@ def reply(
**kwargs, # type: ignore[arg-type]
)

self.speak(msg)

# Add to memory
self.memory.add(msg)

Expand Down
3 changes: 2 additions & 1 deletion src/agentscope/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,9 +135,10 @@ def read_model_configs(
# check if name is unique
for cfg in format_configs:
if cfg.config_name in _MODEL_CONFIGS:
raise ValueError(
logger.warning(
f"config_name [{cfg.config_name}] already exists.",
)
continue
_MODEL_CONFIGS[cfg.config_name] = cfg

# print the loaded model configs
Expand Down
Loading

0 comments on commit 714a758

Please sign in to comment.