From 6d2af0240648f1e7842c1f0b43f66fcf71847aa1 Mon Sep 17 00:00:00 2001 From: Nick Fiacco Date: Wed, 12 Jun 2024 16:04:38 -0400 Subject: [PATCH] Handle function calls in response --- demospace/livekit/openai_assistant/llm.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/demospace/livekit/openai_assistant/llm.py b/demospace/livekit/openai_assistant/llm.py index eb2b648..eb86545 100644 --- a/demospace/livekit/openai_assistant/llm.py +++ b/demospace/livekit/openai_assistant/llm.py @@ -125,10 +125,22 @@ def _add_chunk_to_stream( async def _handle_response_stream( self, stream: openai.AsyncAssistantEventHandler, llm_stream: LLMStream ) -> None: + gathering_function_call = False + function_call = "" async for chunk in stream: self._active_run = stream.current_run if chunk.event == "thread.message.delta": - self._add_chunk_to_stream(llm_stream, chunk) + if "\n\n" in chunk.data.delta.content[0].text.value: + if gathering_function_call: + gathering_function_call = False + logging.info(f"Function call: {function_call}") + # await send_asset(function_call, self._room) + else: + gathering_function_call = True + elif gathering_function_call: + function_call += chunk.data.delta.content[0].text.value + else: + self._add_chunk_to_stream(llm_stream, chunk) elif chunk.event == "thread.run.completed": self._active_run = None llm_stream.push_text(None)