Skip to content

Commit

Permalink
Merge pull request #192 from PrefectHQ/ensure-streaming
Browse files Browse the repository at this point in the history
  • Loading branch information
jlowin authored Apr 9, 2023
2 parents 5cabe79 + 7ad5789 commit 5022510
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 4 deletions.
6 changes: 5 additions & 1 deletion src/marvin/cli/tui.py
Original file line number Diff line number Diff line change
Expand Up @@ -656,7 +656,11 @@ async def get_bot_response(self, event: Input.Submitted) -> str:
),
)

self.query_one("Conversation", Conversation)
# call once to populate the response in case there was any trouble
# streaming live
await self.stream_bot_response(
token_buffer=[response.content], response=bot_response
)

# if this is one of the first few responses, rename the thread
# appropriately
Expand Down
5 changes: 2 additions & 3 deletions src/marvin/utilities/llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,9 @@ def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled."""
self.buffer.append(token)
if self.on_token_callback is not None:
if inspect.iscoroutinefunction(self.on_token_callback):
output = self.on_token_callback(self.buffer)
if inspect.iscoroutine(output):
asyncio.run(self.on_token_callback(self.buffer))
else:
self.on_token_callback(self.buffer)

def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:
"""Run when LLM ends running."""
Expand Down

0 comments on commit 5022510

Please sign in to comment.