Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

removed duplications in function messages. #1262

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 4 additions & 37 deletions livekit-agents/livekit/agents/pipeline/pipeline_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,6 @@ def __init__(self, assistant: "VoicePipelineAgent", llm_stream: LLMStream) -> No
self._assistant = assistant
self._metadata = dict[str, Any]()
self._llm_stream = llm_stream
self._extra_chat_messages: list[ChatMessage] = []

@staticmethod
def get_current() -> "AgentCallContext":
Expand All @@ -92,15 +91,6 @@ def get_metadata(self, key: str, default: Any = None) -> Any:
def llm_stream(self) -> LLMStream:
return self._llm_stream

def add_extra_chat_message(self, message: ChatMessage) -> None:
"""Append chat message to the end of function outputs for the answer LLM call"""
self._extra_chat_messages.append(message)

@property
def extra_chat_messages(self) -> list[ChatMessage]:
return self._extra_chat_messages


def _default_before_llm_cb(
agent: VoicePipelineAgent, chat_ctx: ChatContext
) -> LLMStream:
Expand Down Expand Up @@ -446,22 +436,16 @@ async def say(
await self._track_published_fut

call_ctx = None
fnc_source: str | AsyncIterable[str] | None = None
if add_to_chat_ctx:
try:
call_ctx = AgentCallContext.get_current()
except LookupError:
# no active call context, ignore
pass
else:
if isinstance(source, LLMStream):
logger.warning(
"LLMStream will be ignored for function call chat context"
)
elif isinstance(source, AsyncIterable):
source, fnc_source = utils.aio.itertools.tee(source, 2) # type: ignore
else:
fnc_source = source
if call_ctx is not None:
# Don't add to chat context if we're in a function call
add_to_chat_ctx = False

new_handle = SpeechHandle.create_assistant_speech(
allow_interruptions=allow_interruptions, add_to_chat_ctx=add_to_chat_ctx
Expand All @@ -474,23 +458,6 @@ async def say(
else:
self._add_speech_for_playout(new_handle)

# add the speech to the function call context if needed
if call_ctx is not None and fnc_source is not None:
if isinstance(fnc_source, AsyncIterable):
text = ""
async for chunk in fnc_source:
text += chunk
else:
text = fnc_source

call_ctx.add_extra_chat_message(
ChatMessage.create(text=text, role="assistant")
)
logger.debug(
"added speech to function call chat context",
extra={"text": text},
)

return new_handle

def _update_state(self, state: AgentState, delay: float = 0.0):
Expand Down Expand Up @@ -805,6 +772,7 @@ def _commit_user_question_if_needed() -> None:
collected_text
and speech_handle.add_to_chat_ctx
and (not user_question or speech_handle.user_committed)
and not is_using_tools
):
if speech_handle.extra_tools_messages:
self._chat_ctx.messages.extend(speech_handle.extra_tools_messages)
Expand Down Expand Up @@ -919,7 +887,6 @@ async def _execute_function_calls() -> None:
# synthesize the tool speech with the chat ctx from llm_stream
chat_ctx = call_ctx.chat_ctx.copy()
chat_ctx.messages.extend(extra_tools_messages)
chat_ctx.messages.extend(call_ctx.extra_chat_messages)
answer_llm_stream = self._llm.chat(chat_ctx=chat_ctx, fnc_ctx=self.fnc_ctx)

synthesis_handle = self._synthesize_agent_speech(
Expand Down
Loading