Add Console function to stream result to pretty print console output (#4115)

This commit is contained in:
Eric Zhu 2024-11-08 19:02:19 -08:00 committed by GitHub
parent 3f28aa8874
commit f40b0c2730
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 63 additions and 15 deletions

View File

@ -109,11 +109,11 @@ and running on your machine.
```python ```python
import asyncio import asyncio
from autogen_ext.code_executor.docker_executor import DockerCommandLineCodeExecutor from autogen_ext.code_executors import DockerCommandLineCodeExecutor
from autogen_ext.models import OpenAIChatCompletionClient from autogen_ext.models import OpenAIChatCompletionClient
from autogen_agentchat.agents import CodeExecutorAgent, CodingAssistantAgent from autogen_agentchat.agents import CodeExecutorAgent, CodingAssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.task import TextMentionTermination from autogen_agentchat.task import TextMentionTermination, Console
async def main() -> None: async def main() -> None:
async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor: async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor:
@ -126,8 +126,7 @@ async def main() -> None:
stream = group_chat.run_stream( stream = group_chat.run_stream(
task="Create a plot of NVDIA and TSLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'." task="Create a plot of NVDIA and TSLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'."
) )
async for message in stream: await Console(stream)
print(message)
asyncio.run(main()) asyncio.run(main())
``` ```

View File

@ -70,7 +70,7 @@ ChatMessage = TextMessage | MultiModalMessage | StopMessage | HandoffMessage
"""Messages for agent-to-agent communication.""" """Messages for agent-to-agent communication."""
AgentMessage = InnerMessage | ChatMessage AgentMessage = TextMessage | MultiModalMessage | StopMessage | HandoffMessage | ToolCallMessage | ToolCallResultMessage
"""All message types.""" """All message types."""

View File

@ -1,3 +1,4 @@
from ._console import Console
from ._terminations import MaxMessageTermination, StopMessageTermination, TextMentionTermination, TokenUsageTermination from ._terminations import MaxMessageTermination, StopMessageTermination, TextMentionTermination, TokenUsageTermination
__all__ = [ __all__ = [
@ -5,4 +6,5 @@ __all__ = [
"TextMentionTermination", "TextMentionTermination",
"StopMessageTermination", "StopMessageTermination",
"TokenUsageTermination", "TokenUsageTermination",
"Console",
] ]

View File

@ -0,0 +1,35 @@
import sys
import time
from typing import AsyncGenerator
from autogen_core.components.models import RequestUsage
from autogen_agentchat.base import TaskResult
from autogen_agentchat.messages import AgentMessage
async def Console(stream: AsyncGenerator[AgentMessage | TaskResult, None]) -> None:
"""Consume the stream from :meth:`~autogen_agentchat.teams.Team.run_stream`
and print the messages to the console."""
start_time = time.time()
total_usage = RequestUsage(prompt_tokens=0, completion_tokens=0)
async for message in stream:
if isinstance(message, TaskResult):
duration = time.time() - start_time
output = (
f"{'-' * 10} Summary {'-' * 10}\n"
f"Number of messages: {len(message.messages)}\n"
f"Finish reason: {message.stop_reason}\n"
f"Total prompt tokens: {total_usage.prompt_tokens}\n"
f"Total completion tokens: {total_usage.completion_tokens}\n"
f"Duration: {duration:.2f} seconds\n"
)
sys.stdout.write(output)
else:
output = f"{'-' * 10} {message.source} {'-' * 10}\n{message.content}\n"
if message.models_usage:
output += f"[Prompt tokens: {message.models_usage.prompt_tokens}, Completion tokens: {message.models_usage.completion_tokens}]\n"
total_usage.completion_tokens += message.models_usage.completion_tokens
total_usage.prompt_tokens += message.models_usage.prompt_tokens
sys.stdout.write(output)

View File

@ -35,18 +35,31 @@
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"source='user' models_usage=None content='What is the weather in New York?'\n", "---------- user ----------\n",
"source='weather_agent' models_usage=RequestUsage(prompt_tokens=79, completion_tokens=15) content=[FunctionCall(id='call_CntvzLVL7iYJwPP2WWeBKNHc', arguments='{\"city\":\"New York\"}', name='get_weather')]\n", "What is the weather in New York?\n",
"source='weather_agent' models_usage=None content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_CntvzLVL7iYJwPP2WWeBKNHc')]\n", "---------- weather_agent ----------\n",
"source='weather_agent' models_usage=RequestUsage(prompt_tokens=90, completion_tokens=14) content='The weather in New York is currently 73 degrees and sunny.'\n", "[FunctionCall(id='call_AhTZ2q3TNL8x0qs00e3wIZ7y', arguments='{\"city\":\"New York\"}', name='get_weather')]\n",
"source='weather_agent' models_usage=RequestUsage(prompt_tokens=137, completion_tokens=4) content='TERMINATE'\n", "[Prompt tokens: 79, Completion tokens: 15]\n",
"TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?'), ToolCallMessage(source='weather_agent', models_usage=RequestUsage(prompt_tokens=79, completion_tokens=15), content=[FunctionCall(id='call_CntvzLVL7iYJwPP2WWeBKNHc', arguments='{\"city\":\"New York\"}', name='get_weather')]), ToolCallResultMessage(source='weather_agent', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_CntvzLVL7iYJwPP2WWeBKNHc')]), TextMessage(source='weather_agent', models_usage=RequestUsage(prompt_tokens=90, completion_tokens=14), content='The weather in New York is currently 73 degrees and sunny.'), TextMessage(source='weather_agent', models_usage=RequestUsage(prompt_tokens=137, completion_tokens=4), content='TERMINATE')], stop_reason=\"Text 'TERMINATE' mentioned\")\n" "---------- weather_agent ----------\n",
"[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_AhTZ2q3TNL8x0qs00e3wIZ7y')]\n",
"---------- weather_agent ----------\n",
"The weather in New York is currently 73 degrees and sunny.\n",
"[Prompt tokens: 90, Completion tokens: 14]\n",
"---------- weather_agent ----------\n",
"TERMINATE\n",
"[Prompt tokens: 137, Completion tokens: 4]\n",
"---------- Summary ----------\n",
"Number of messages: 5\n",
"Finish reason: Text 'TERMINATE' mentioned\n",
"Total prompt tokens: 306\n",
"Total completion tokens: 33\n",
"Duration: 1.43 seconds\n"
] ]
} }
], ],
"source": [ "source": [
"from autogen_agentchat.agents import AssistantAgent\n", "from autogen_agentchat.agents import AssistantAgent\n",
"from autogen_agentchat.task import TextMentionTermination\n", "from autogen_agentchat.task import Console, TextMentionTermination\n",
"from autogen_agentchat.teams import RoundRobinGroupChat\n", "from autogen_agentchat.teams import RoundRobinGroupChat\n",
"from autogen_ext.models import OpenAIChatCompletionClient\n", "from autogen_ext.models import OpenAIChatCompletionClient\n",
"\n", "\n",
@ -72,8 +85,7 @@
"\n", "\n",
" # Run the team and stream messages\n", " # Run the team and stream messages\n",
" stream = agent_team.run_stream(task=\"What is the weather in New York?\")\n", " stream = agent_team.run_stream(task=\"What is the weather in New York?\")\n",
" async for response in stream:\n", " await Console(stream)\n",
" print(response)\n",
"\n", "\n",
"\n", "\n",
"# NOTE: if running this inside a Python script you'll need to use asyncio.run(main()).\n", "# NOTE: if running this inside a Python script you'll need to use asyncio.run(main()).\n",
@ -114,7 +126,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.12.6" "version": "3.11.5"
} }
}, },
"nbformat": 4, "nbformat": 4,