mirror of https://github.com/microsoft/autogen.git
Define AgentEvent, rename tool call messages to events. (#4750)
* Define AgentEvent, rename tool call messages to events. * update doc * Use AgentEvent | ChatMessage to replace AgentMessage * Update docs * update deprecation notice * remove unused * fix doc * format
This commit is contained in:
parent
7a7eb7449a
commit
e902e94b14
|
@ -21,13 +21,13 @@ from .. import EVENT_LOGGER_NAME
|
|||
from ..base import Handoff as HandoffBase
|
||||
from ..base import Response
|
||||
from ..messages import (
|
||||
AgentMessage,
|
||||
AgentEvent,
|
||||
ChatMessage,
|
||||
HandoffMessage,
|
||||
MultiModalMessage,
|
||||
TextMessage,
|
||||
ToolCallMessage,
|
||||
ToolCallResultMessage,
|
||||
ToolCallExecutionEvent,
|
||||
ToolCallRequestEvent,
|
||||
)
|
||||
from ..state import AssistantAgentState
|
||||
from ._base_chat_agent import BaseChatAgent
|
||||
|
@ -292,7 +292,7 @@ class AssistantAgent(BaseChatAgent):
|
|||
|
||||
async def on_messages_stream(
|
||||
self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken
|
||||
) -> AsyncGenerator[AgentMessage | Response, None]:
|
||||
) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:
|
||||
# Add messages to the model context.
|
||||
for msg in messages:
|
||||
if isinstance(msg, MultiModalMessage) and self._model_client.capabilities["vision"] is False:
|
||||
|
@ -300,7 +300,7 @@ class AssistantAgent(BaseChatAgent):
|
|||
self._model_context.append(UserMessage(content=msg.content, source=msg.source))
|
||||
|
||||
# Inner messages.
|
||||
inner_messages: List[AgentMessage] = []
|
||||
inner_messages: List[AgentEvent | ChatMessage] = []
|
||||
|
||||
# Generate an inference result based on the current model context.
|
||||
llm_messages = self._system_messages + self._model_context
|
||||
|
@ -321,7 +321,7 @@ class AssistantAgent(BaseChatAgent):
|
|||
|
||||
# Process tool calls.
|
||||
assert isinstance(result.content, list) and all(isinstance(item, FunctionCall) for item in result.content)
|
||||
tool_call_msg = ToolCallMessage(content=result.content, source=self.name, models_usage=result.usage)
|
||||
tool_call_msg = ToolCallRequestEvent(content=result.content, source=self.name, models_usage=result.usage)
|
||||
event_logger.debug(tool_call_msg)
|
||||
# Add the tool call message to the output.
|
||||
inner_messages.append(tool_call_msg)
|
||||
|
@ -329,7 +329,7 @@ class AssistantAgent(BaseChatAgent):
|
|||
|
||||
# Execute the tool calls.
|
||||
results = await asyncio.gather(*[self._execute_tool_call(call, cancellation_token) for call in result.content])
|
||||
tool_call_result_msg = ToolCallResultMessage(content=results, source=self.name)
|
||||
tool_call_result_msg = ToolCallExecutionEvent(content=results, source=self.name)
|
||||
event_logger.debug(tool_call_result_msg)
|
||||
self._model_context.append(FunctionExecutionResultMessage(content=results))
|
||||
inner_messages.append(tool_call_result_msg)
|
||||
|
|
|
@ -5,7 +5,7 @@ from autogen_core import CancellationToken
|
|||
|
||||
from ..base import ChatAgent, Response, TaskResult
|
||||
from ..messages import (
|
||||
AgentMessage,
|
||||
AgentEvent,
|
||||
ChatMessage,
|
||||
TextMessage,
|
||||
)
|
||||
|
@ -58,7 +58,7 @@ class BaseChatAgent(ChatAgent, ABC):
|
|||
|
||||
async def on_messages_stream(
|
||||
self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken
|
||||
) -> AsyncGenerator[AgentMessage | Response, None]:
|
||||
) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:
|
||||
"""Handles incoming messages and returns a stream of messages and
|
||||
and the final item is the response. The base implementation in
|
||||
:class:`BaseChatAgent` simply calls :meth:`on_messages` and yields
|
||||
|
@ -89,7 +89,7 @@ class BaseChatAgent(ChatAgent, ABC):
|
|||
if cancellation_token is None:
|
||||
cancellation_token = CancellationToken()
|
||||
input_messages: List[ChatMessage] = []
|
||||
output_messages: List[AgentMessage] = []
|
||||
output_messages: List[AgentEvent | ChatMessage] = []
|
||||
if task is None:
|
||||
pass
|
||||
elif isinstance(task, str):
|
||||
|
@ -119,13 +119,13 @@ class BaseChatAgent(ChatAgent, ABC):
|
|||
*,
|
||||
task: str | ChatMessage | List[ChatMessage] | None = None,
|
||||
cancellation_token: CancellationToken | None = None,
|
||||
) -> AsyncGenerator[AgentMessage | TaskResult, None]:
|
||||
) -> AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None]:
|
||||
"""Run the agent with the given task and return a stream of messages
|
||||
and the final task result as the last item in the stream."""
|
||||
if cancellation_token is None:
|
||||
cancellation_token = CancellationToken()
|
||||
input_messages: List[ChatMessage] = []
|
||||
output_messages: List[AgentMessage] = []
|
||||
output_messages: List[AgentEvent | ChatMessage] = []
|
||||
if task is None:
|
||||
pass
|
||||
elif isinstance(task, str):
|
||||
|
|
|
@ -8,7 +8,7 @@ from autogen_agentchat.state import SocietyOfMindAgentState
|
|||
|
||||
from ..base import TaskResult, Team
|
||||
from ..messages import (
|
||||
AgentMessage,
|
||||
AgentEvent,
|
||||
ChatMessage,
|
||||
HandoffMessage,
|
||||
MultiModalMessage,
|
||||
|
@ -119,13 +119,13 @@ class SocietyOfMindAgent(BaseChatAgent):
|
|||
|
||||
async def on_messages_stream(
|
||||
self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken
|
||||
) -> AsyncGenerator[AgentMessage | Response, None]:
|
||||
) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:
|
||||
# Prepare the task for the team of agents.
|
||||
task = list(messages)
|
||||
|
||||
# Run the team of agents.
|
||||
result: TaskResult | None = None
|
||||
inner_messages: List[AgentMessage] = []
|
||||
inner_messages: List[AgentEvent | ChatMessage] = []
|
||||
count = 0
|
||||
async for inner_msg in self._team.run_stream(task=task, cancellation_token=cancellation_token):
|
||||
if isinstance(inner_msg, TaskResult):
|
||||
|
|
|
@ -3,7 +3,7 @@ from typing import Any, AsyncGenerator, List, Mapping, Protocol, Sequence, runti
|
|||
|
||||
from autogen_core import CancellationToken
|
||||
|
||||
from ..messages import AgentMessage, ChatMessage
|
||||
from ..messages import AgentEvent, ChatMessage
|
||||
from ._task import TaskRunner
|
||||
|
||||
|
||||
|
@ -14,7 +14,7 @@ class Response:
|
|||
chat_message: ChatMessage
|
||||
"""A chat message produced by the agent as the response."""
|
||||
|
||||
inner_messages: List[AgentMessage] | None = None
|
||||
inner_messages: List[AgentEvent | ChatMessage] | None = None
|
||||
"""Inner messages produced by the agent."""
|
||||
|
||||
|
||||
|
@ -46,7 +46,7 @@ class ChatAgent(TaskRunner, Protocol):
|
|||
|
||||
def on_messages_stream(
|
||||
self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken
|
||||
) -> AsyncGenerator[AgentMessage | Response, None]:
|
||||
) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:
|
||||
"""Handles incoming messages and returns a stream of inner messages and
|
||||
and the final item is the response."""
|
||||
...
|
||||
|
|
|
@ -3,14 +3,14 @@ from typing import AsyncGenerator, List, Protocol, Sequence
|
|||
|
||||
from autogen_core import CancellationToken
|
||||
|
||||
from ..messages import AgentMessage, ChatMessage
|
||||
from ..messages import AgentEvent, ChatMessage
|
||||
|
||||
|
||||
@dataclass
|
||||
class TaskResult:
|
||||
"""Result of running a task."""
|
||||
|
||||
messages: Sequence[AgentMessage]
|
||||
messages: Sequence[AgentEvent | ChatMessage]
|
||||
"""Messages produced by the task."""
|
||||
|
||||
stop_reason: str | None = None
|
||||
|
@ -38,7 +38,7 @@ class TaskRunner(Protocol):
|
|||
*,
|
||||
task: str | ChatMessage | List[ChatMessage] | None = None,
|
||||
cancellation_token: CancellationToken | None = None,
|
||||
) -> AsyncGenerator[AgentMessage | TaskResult, None]:
|
||||
) -> AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None]:
|
||||
"""Run the task and produces a stream of messages and the final result
|
||||
:class:`TaskResult` as the last item in the stream.
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ import asyncio
|
|||
from abc import ABC, abstractmethod
|
||||
from typing import List, Sequence
|
||||
|
||||
from ..messages import AgentMessage, StopMessage
|
||||
from ..messages import AgentEvent, ChatMessage, StopMessage
|
||||
|
||||
|
||||
class TerminatedException(BaseException): ...
|
||||
|
@ -50,7 +50,7 @@ class TerminationCondition(ABC):
|
|||
...
|
||||
|
||||
@abstractmethod
|
||||
async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None:
|
||||
async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None:
|
||||
"""Check if the conversation should be terminated based on the messages received
|
||||
since the last time the condition was called.
|
||||
Return a StopMessage if the conversation should be terminated, or None otherwise.
|
||||
|
@ -88,7 +88,7 @@ class _AndTerminationCondition(TerminationCondition):
|
|||
def terminated(self) -> bool:
|
||||
return all(condition.terminated for condition in self._conditions)
|
||||
|
||||
async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None:
|
||||
async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None:
|
||||
if self.terminated:
|
||||
raise TerminatedException("Termination condition has already been reached.")
|
||||
# Check all remaining conditions.
|
||||
|
@ -120,7 +120,7 @@ class _OrTerminationCondition(TerminationCondition):
|
|||
def terminated(self) -> bool:
|
||||
return any(condition.terminated for condition in self._conditions)
|
||||
|
||||
async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None:
|
||||
async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None:
|
||||
if self.terminated:
|
||||
raise RuntimeError("Termination condition has already been reached")
|
||||
stop_messages = await asyncio.gather(*[condition(messages) for condition in self._conditions])
|
||||
|
|
|
@ -2,7 +2,7 @@ import time
|
|||
from typing import List, Sequence
|
||||
|
||||
from ..base import TerminatedException, TerminationCondition
|
||||
from ..messages import AgentMessage, HandoffMessage, MultiModalMessage, StopMessage, TextMessage
|
||||
from ..messages import AgentEvent, ChatMessage, HandoffMessage, MultiModalMessage, StopMessage, TextMessage
|
||||
|
||||
|
||||
class StopMessageTermination(TerminationCondition):
|
||||
|
@ -15,7 +15,7 @@ class StopMessageTermination(TerminationCondition):
|
|||
def terminated(self) -> bool:
|
||||
return self._terminated
|
||||
|
||||
async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None:
|
||||
async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None:
|
||||
if self._terminated:
|
||||
raise TerminatedException("Termination condition has already been reached")
|
||||
for message in messages:
|
||||
|
@ -43,7 +43,7 @@ class MaxMessageTermination(TerminationCondition):
|
|||
def terminated(self) -> bool:
|
||||
return self._message_count >= self._max_messages
|
||||
|
||||
async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None:
|
||||
async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None:
|
||||
if self.terminated:
|
||||
raise TerminatedException("Termination condition has already been reached")
|
||||
self._message_count += len(messages)
|
||||
|
@ -73,7 +73,7 @@ class TextMentionTermination(TerminationCondition):
|
|||
def terminated(self) -> bool:
|
||||
return self._terminated
|
||||
|
||||
async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None:
|
||||
async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None:
|
||||
if self._terminated:
|
||||
raise TerminatedException("Termination condition has already been reached")
|
||||
for message in messages:
|
||||
|
@ -128,7 +128,7 @@ class TokenUsageTermination(TerminationCondition):
|
|||
or (self._max_completion_token is not None and self._completion_token_count >= self._max_completion_token)
|
||||
)
|
||||
|
||||
async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None:
|
||||
async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None:
|
||||
if self.terminated:
|
||||
raise TerminatedException("Termination condition has already been reached")
|
||||
for message in messages:
|
||||
|
@ -163,7 +163,7 @@ class HandoffTermination(TerminationCondition):
|
|||
def terminated(self) -> bool:
|
||||
return self._terminated
|
||||
|
||||
async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None:
|
||||
async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None:
|
||||
if self._terminated:
|
||||
raise TerminatedException("Termination condition has already been reached")
|
||||
for message in messages:
|
||||
|
@ -194,7 +194,7 @@ class TimeoutTermination(TerminationCondition):
|
|||
def terminated(self) -> bool:
|
||||
return self._terminated
|
||||
|
||||
async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None:
|
||||
async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None:
|
||||
if self._terminated:
|
||||
raise TerminatedException("Termination condition has already been reached")
|
||||
|
||||
|
@ -242,7 +242,7 @@ class ExternalTermination(TerminationCondition):
|
|||
"""Set the termination condition to terminated."""
|
||||
self._setted = True
|
||||
|
||||
async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None:
|
||||
async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None:
|
||||
if self._terminated:
|
||||
raise TerminatedException("Termination condition has already been reached")
|
||||
if self._setted:
|
||||
|
@ -273,7 +273,7 @@ class SourceMatchTermination(TerminationCondition):
|
|||
def terminated(self) -> bool:
|
||||
return self._terminated
|
||||
|
||||
async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None:
|
||||
async def __call__(self, messages: Sequence[AgentEvent | ChatMessage]) -> StopMessage | None:
|
||||
if self._terminated:
|
||||
raise TerminatedException("Termination condition has already been reached")
|
||||
if not messages:
|
||||
|
|
|
@ -9,7 +9,7 @@ from typing import List, Literal
|
|||
from autogen_core import FunctionCall, Image
|
||||
from autogen_core.models import FunctionExecutionResult, RequestUsage
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
from typing_extensions import Annotated
|
||||
from typing_extensions import Annotated, deprecated
|
||||
|
||||
|
||||
class BaseMessage(BaseModel):
|
||||
|
@ -63,6 +63,7 @@ class HandoffMessage(BaseMessage):
|
|||
type: Literal["HandoffMessage"] = "HandoffMessage"
|
||||
|
||||
|
||||
@deprecated("Will be removed in 0.4.0, use ToolCallRequestEvent instead.")
|
||||
class ToolCallMessage(BaseMessage):
|
||||
"""A message signaling the use of tools."""
|
||||
|
||||
|
@ -72,6 +73,7 @@ class ToolCallMessage(BaseMessage):
|
|||
type: Literal["ToolCallMessage"] = "ToolCallMessage"
|
||||
|
||||
|
||||
@deprecated("Will be removed in 0.4.0, use ToolCallExecutionEvent instead.")
|
||||
class ToolCallResultMessage(BaseMessage):
|
||||
"""A message signaling the results of tool calls."""
|
||||
|
||||
|
@ -81,15 +83,37 @@ class ToolCallResultMessage(BaseMessage):
|
|||
type: Literal["ToolCallResultMessage"] = "ToolCallResultMessage"
|
||||
|
||||
|
||||
class ToolCallRequestEvent(BaseMessage):
|
||||
"""An event signaling a request to use tools."""
|
||||
|
||||
content: List[FunctionCall]
|
||||
"""The tool calls."""
|
||||
|
||||
type: Literal["ToolCallRequestEvent"] = "ToolCallRequestEvent"
|
||||
|
||||
|
||||
class ToolCallExecutionEvent(BaseMessage):
|
||||
"""An event signaling the execution of tool calls."""
|
||||
|
||||
content: List[FunctionExecutionResult]
|
||||
"""The tool call results."""
|
||||
|
||||
type: Literal["ToolCallExecutionEvent"] = "ToolCallExecutionEvent"
|
||||
|
||||
|
||||
ChatMessage = Annotated[TextMessage | MultiModalMessage | StopMessage | HandoffMessage, Field(discriminator="type")]
|
||||
"""Messages for agent-to-agent communication."""
|
||||
"""Messages for agent-to-agent communication only."""
|
||||
|
||||
|
||||
AgentEvent = Annotated[ToolCallRequestEvent | ToolCallExecutionEvent, Field(discriminator="type")]
|
||||
"""Events emitted by agents and teams when they work, not used for agent-to-agent communication."""
|
||||
|
||||
|
||||
AgentMessage = Annotated[
|
||||
TextMessage | MultiModalMessage | StopMessage | HandoffMessage | ToolCallMessage | ToolCallResultMessage,
|
||||
TextMessage | MultiModalMessage | StopMessage | HandoffMessage | ToolCallRequestEvent | ToolCallExecutionEvent,
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
"""All message types."""
|
||||
"""(Deprecated, will be removed in 0.4.0) All message and event types."""
|
||||
|
||||
|
||||
__all__ = [
|
||||
|
@ -98,8 +122,11 @@ __all__ = [
|
|||
"MultiModalMessage",
|
||||
"StopMessage",
|
||||
"HandoffMessage",
|
||||
"ToolCallRequestEvent",
|
||||
"ToolCallExecutionEvent",
|
||||
"ToolCallMessage",
|
||||
"ToolCallResultMessage",
|
||||
"ChatMessage",
|
||||
"AgentEvent",
|
||||
"AgentMessage",
|
||||
]
|
||||
|
|
|
@ -6,7 +6,7 @@ from autogen_core.models import (
|
|||
from pydantic import BaseModel, Field
|
||||
|
||||
from ..messages import (
|
||||
AgentMessage,
|
||||
AgentEvent,
|
||||
ChatMessage,
|
||||
)
|
||||
|
||||
|
@ -36,7 +36,7 @@ class TeamState(BaseState):
|
|||
class BaseGroupChatManagerState(BaseState):
|
||||
"""Base state for all group chat managers."""
|
||||
|
||||
message_thread: List[AgentMessage] = Field(default_factory=list)
|
||||
message_thread: List[AgentEvent | ChatMessage] = Field(default_factory=list)
|
||||
current_turn: int = Field(default=0)
|
||||
type: str = Field(default="BaseGroupChatManagerState")
|
||||
|
||||
|
|
|
@ -27,39 +27,39 @@ from ..conditions import (
|
|||
from ..conditions import (
|
||||
TokenUsageTermination as TokenUsageTerminationAlias,
|
||||
)
|
||||
from ..messages import AgentMessage
|
||||
from ..messages import AgentEvent, ChatMessage
|
||||
from ..ui import Console as ConsoleAlias
|
||||
|
||||
|
||||
@deprecated("Moved to autogen_agentchat.terminations.ExternalTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
@deprecated("Moved to autogen_agentchat.conditions.ExternalTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
class ExternalTermination(ExternalTerminationAlias): ...
|
||||
|
||||
|
||||
@deprecated("Moved to autogen_agentchat.terminations.HandoffTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
@deprecated("Moved to autogen_agentchat.conditions.HandoffTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
class HandoffTermination(HandoffTerminationAlias): ...
|
||||
|
||||
|
||||
@deprecated("Moved to autogen_agentchat.terminations.MaxMessageTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
@deprecated("Moved to autogen_agentchat.conditions.MaxMessageTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
class MaxMessageTermination(MaxMessageTerminationAlias): ...
|
||||
|
||||
|
||||
@deprecated("Moved to autogen_agentchat.terminations.SourceMatchTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
@deprecated("Moved to autogen_agentchat.conditions.SourceMatchTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
class SourceMatchTermination(SourceMatchTerminationAlias): ...
|
||||
|
||||
|
||||
@deprecated("Moved to autogen_agentchat.terminations.StopMessageTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
@deprecated("Moved to autogen_agentchat.conditions.StopMessageTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
class StopMessageTermination(StopMessageTerminationAlias): ...
|
||||
|
||||
|
||||
@deprecated("Moved to autogen_agentchat.terminations.TextMentionTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
@deprecated("Moved to autogen_agentchat.conditions.TextMentionTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
class TextMentionTermination(TextMentionTerminationAlias): ...
|
||||
|
||||
|
||||
@deprecated("Moved to autogen_agentchat.terminations.TimeoutTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
@deprecated("Moved to autogen_agentchat.conditions.TimeoutTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
class TimeoutTermination(TimeoutTerminationAlias): ...
|
||||
|
||||
|
||||
@deprecated("Moved to autogen_agentchat.terminations.TokenUsageTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
@deprecated("Moved to autogen_agentchat.conditions.TokenUsageTermination. Will remove this in 0.4.0.", stacklevel=2)
|
||||
class TokenUsageTermination(TokenUsageTerminationAlias): ...
|
||||
|
||||
|
||||
|
@ -68,7 +68,7 @@ T = TypeVar("T", bound=TaskResult | Response)
|
|||
|
||||
@deprecated("Moved to autogen_agentchat.ui.Console. Will remove this in 0.4.0.", stacklevel=2)
|
||||
async def Console(
|
||||
stream: AsyncGenerator[AgentMessage | T, None],
|
||||
stream: AsyncGenerator[AgentEvent | ChatMessage | T, None],
|
||||
*,
|
||||
no_inline_images: bool = False,
|
||||
) -> T:
|
||||
|
|
|
@ -19,7 +19,7 @@ from autogen_core._closure_agent import ClosureContext
|
|||
|
||||
from ... import EVENT_LOGGER_NAME
|
||||
from ...base import ChatAgent, TaskResult, Team, TerminationCondition
|
||||
from ...messages import AgentMessage, ChatMessage, TextMessage
|
||||
from ...messages import AgentEvent, ChatMessage, TextMessage
|
||||
from ...state import TeamState
|
||||
from ._chat_agent_container import ChatAgentContainer
|
||||
from ._events import GroupChatMessage, GroupChatReset, GroupChatStart, GroupChatTermination
|
||||
|
@ -62,7 +62,7 @@ class BaseGroupChat(Team, ABC):
|
|||
|
||||
# Constants for the closure agent to collect the output messages.
|
||||
self._stop_reason: str | None = None
|
||||
self._output_message_queue: asyncio.Queue[AgentMessage | None] = asyncio.Queue()
|
||||
self._output_message_queue: asyncio.Queue[AgentEvent | ChatMessage | None] = asyncio.Queue()
|
||||
|
||||
# Create a runtime for the team.
|
||||
# TODO: The runtime should be created by a managed context.
|
||||
|
@ -273,7 +273,7 @@ class BaseGroupChat(Team, ABC):
|
|||
*,
|
||||
task: str | ChatMessage | List[ChatMessage] | None = None,
|
||||
cancellation_token: CancellationToken | None = None,
|
||||
) -> AsyncGenerator[AgentMessage | TaskResult, None]:
|
||||
) -> AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None]:
|
||||
"""Run the team and produces a stream of messages and the final result
|
||||
of the type :class:`TaskResult` as the last item in the stream. Once the
|
||||
team is stopped, the termination condition is reset.
|
||||
|
@ -405,7 +405,7 @@ class BaseGroupChat(Team, ABC):
|
|||
cancellation_token=cancellation_token,
|
||||
)
|
||||
# Collect the output messages in order.
|
||||
output_messages: List[AgentMessage] = []
|
||||
output_messages: List[AgentEvent | ChatMessage] = []
|
||||
# Yield the messsages until the queue is empty.
|
||||
while True:
|
||||
message_future = asyncio.ensure_future(self._output_message_queue.get())
|
||||
|
|
|
@ -5,7 +5,7 @@ from typing import Any, List
|
|||
from autogen_core import DefaultTopicId, MessageContext, event, rpc
|
||||
|
||||
from ...base import TerminationCondition
|
||||
from ...messages import AgentMessage, ChatMessage, StopMessage
|
||||
from ...messages import AgentEvent, ChatMessage, StopMessage
|
||||
from ._events import (
|
||||
GroupChatAgentResponse,
|
||||
GroupChatRequestPublish,
|
||||
|
@ -48,7 +48,7 @@ class BaseGroupChatManager(SequentialRoutedAgent, ABC):
|
|||
raise ValueError("The group topic type must not be in the participant topic types.")
|
||||
self._participant_topic_types = participant_topic_types
|
||||
self._participant_descriptions = participant_descriptions
|
||||
self._message_thread: List[AgentMessage] = []
|
||||
self._message_thread: List[AgentEvent | ChatMessage] = []
|
||||
self._termination_condition = termination_condition
|
||||
if max_turns is not None and max_turns <= 0:
|
||||
raise ValueError("The maximum number of turns must be greater than 0.")
|
||||
|
@ -115,7 +115,7 @@ class BaseGroupChatManager(SequentialRoutedAgent, ABC):
|
|||
@event
|
||||
async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: MessageContext) -> None:
|
||||
# Append the message to the message thread and construct the delta.
|
||||
delta: List[AgentMessage] = []
|
||||
delta: List[AgentEvent | ChatMessage] = []
|
||||
if message.agent_response.inner_messages is not None:
|
||||
for inner_message in message.agent_response.inner_messages:
|
||||
self._message_thread.append(inner_message)
|
||||
|
@ -180,7 +180,7 @@ class BaseGroupChatManager(SequentialRoutedAgent, ABC):
|
|||
...
|
||||
|
||||
@abstractmethod
|
||||
async def select_speaker(self, thread: List[AgentMessage]) -> str:
|
||||
async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str:
|
||||
"""Select a speaker from the participants and return the
|
||||
topic type of the selected speaker."""
|
||||
...
|
||||
|
|
|
@ -3,7 +3,7 @@ from typing import List
|
|||
from pydantic import BaseModel
|
||||
|
||||
from ...base import Response
|
||||
from ...messages import AgentMessage, ChatMessage, StopMessage
|
||||
from ...messages import AgentEvent, ChatMessage, StopMessage
|
||||
|
||||
|
||||
class GroupChatStart(BaseModel):
|
||||
|
@ -29,7 +29,7 @@ class GroupChatRequestPublish(BaseModel):
|
|||
class GroupChatMessage(BaseModel):
|
||||
"""A message from a group chat."""
|
||||
|
||||
message: AgentMessage
|
||||
message: AgentEvent | ChatMessage
|
||||
"""The message that was published."""
|
||||
|
||||
|
||||
|
|
|
@ -13,14 +13,14 @@ from autogen_core.models import (
|
|||
from .... import TRACE_LOGGER_NAME
|
||||
from ....base import Response, TerminationCondition
|
||||
from ....messages import (
|
||||
AgentMessage,
|
||||
AgentEvent,
|
||||
ChatMessage,
|
||||
HandoffMessage,
|
||||
MultiModalMessage,
|
||||
StopMessage,
|
||||
TextMessage,
|
||||
ToolCallMessage,
|
||||
ToolCallResultMessage,
|
||||
ToolCallExecutionEvent,
|
||||
ToolCallRequestEvent,
|
||||
)
|
||||
from ....state import MagenticOneOrchestratorState
|
||||
from .._base_group_chat_manager import BaseGroupChatManager
|
||||
|
@ -167,7 +167,7 @@ class MagenticOneOrchestrator(BaseGroupChatManager):
|
|||
|
||||
@event
|
||||
async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: MessageContext) -> None: # type: ignore
|
||||
delta: List[AgentMessage] = []
|
||||
delta: List[AgentEvent | ChatMessage] = []
|
||||
if message.agent_response.inner_messages is not None:
|
||||
for inner_message in message.agent_response.inner_messages:
|
||||
delta.append(inner_message)
|
||||
|
@ -210,7 +210,7 @@ class MagenticOneOrchestrator(BaseGroupChatManager):
|
|||
self._n_rounds = orchestrator_state.n_rounds
|
||||
self._n_stalls = orchestrator_state.n_stalls
|
||||
|
||||
async def select_speaker(self, thread: List[AgentMessage]) -> str:
|
||||
async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str:
|
||||
"""Not used in this orchestrator, we select next speaker in _orchestrate_step."""
|
||||
return ""
|
||||
|
||||
|
@ -427,7 +427,7 @@ class MagenticOneOrchestrator(BaseGroupChatManager):
|
|||
"""Convert the message thread to a context for the model."""
|
||||
context: List[LLMMessage] = []
|
||||
for m in self._message_thread:
|
||||
if isinstance(m, ToolCallMessage | ToolCallResultMessage):
|
||||
if isinstance(m, ToolCallRequestEvent | ToolCallExecutionEvent):
|
||||
# Ignore tool call messages.
|
||||
continue
|
||||
elif isinstance(m, StopMessage | HandoffMessage):
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
from typing import Any, Callable, List, Mapping
|
||||
|
||||
from ...base import ChatAgent, TerminationCondition
|
||||
from ...messages import AgentMessage, ChatMessage
|
||||
from ...messages import AgentEvent, ChatMessage
|
||||
from ...state import RoundRobinManagerState
|
||||
from ._base_group_chat import BaseGroupChat
|
||||
from ._base_group_chat_manager import BaseGroupChatManager
|
||||
|
@ -53,7 +53,7 @@ class RoundRobinGroupChatManager(BaseGroupChatManager):
|
|||
self._current_turn = round_robin_state.current_turn
|
||||
self._next_speaker_index = round_robin_state.next_speaker_index
|
||||
|
||||
async def select_speaker(self, thread: List[AgentMessage]) -> str:
|
||||
async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str:
|
||||
"""Select a speaker from the participants in a round-robin fashion."""
|
||||
current_speaker_index = self._next_speaker_index
|
||||
self._next_speaker_index = (current_speaker_index + 1) % len(self._participant_topic_types)
|
||||
|
|
|
@ -7,14 +7,14 @@ from autogen_core.models import ChatCompletionClient, SystemMessage
|
|||
from ... import TRACE_LOGGER_NAME
|
||||
from ...base import ChatAgent, TerminationCondition
|
||||
from ...messages import (
|
||||
AgentMessage,
|
||||
AgentEvent,
|
||||
ChatMessage,
|
||||
HandoffMessage,
|
||||
MultiModalMessage,
|
||||
StopMessage,
|
||||
TextMessage,
|
||||
ToolCallMessage,
|
||||
ToolCallResultMessage,
|
||||
ToolCallExecutionEvent,
|
||||
ToolCallRequestEvent,
|
||||
)
|
||||
from ...state import SelectorManagerState
|
||||
from ._base_group_chat import BaseGroupChat
|
||||
|
@ -38,7 +38,7 @@ class SelectorGroupChatManager(BaseGroupChatManager):
|
|||
model_client: ChatCompletionClient,
|
||||
selector_prompt: str,
|
||||
allow_repeated_speaker: bool,
|
||||
selector_func: Callable[[Sequence[AgentMessage]], str | None] | None,
|
||||
selector_func: Callable[[Sequence[AgentEvent | ChatMessage]], str | None] | None,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
group_topic_type,
|
||||
|
@ -78,7 +78,7 @@ class SelectorGroupChatManager(BaseGroupChatManager):
|
|||
self._current_turn = selector_state.current_turn
|
||||
self._previous_speaker = selector_state.previous_speaker
|
||||
|
||||
async def select_speaker(self, thread: List[AgentMessage]) -> str:
|
||||
async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str:
|
||||
"""Selects the next speaker in a group chat using a ChatCompletion client,
|
||||
with the selector function as override if it returns a speaker name.
|
||||
|
||||
|
@ -95,7 +95,7 @@ class SelectorGroupChatManager(BaseGroupChatManager):
|
|||
# Construct the history of the conversation.
|
||||
history_messages: List[str] = []
|
||||
for msg in thread:
|
||||
if isinstance(msg, ToolCallMessage | ToolCallResultMessage):
|
||||
if isinstance(msg, ToolCallRequestEvent | ToolCallExecutionEvent):
|
||||
# Ignore tool call messages.
|
||||
continue
|
||||
# The agent type must be the same as the topic type, which we use as the agent name.
|
||||
|
@ -204,7 +204,7 @@ class SelectorGroupChat(BaseGroupChat):
|
|||
Must contain '{roles}', '{participants}', and '{history}' to be filled in.
|
||||
allow_repeated_speaker (bool, optional): Whether to allow the same speaker to be selected
|
||||
consecutively. Defaults to False.
|
||||
selector_func (Callable[[Sequence[AgentMessage]], str | None], optional): A custom selector
|
||||
selector_func (Callable[[Sequence[AgentEvent | ChatMessage]], str | None], optional): A custom selector
|
||||
function that takes the conversation history and returns the name of the next speaker.
|
||||
If provided, this function will be used to override the model to select the next speaker.
|
||||
If the function returns None, the model will be used to select the next speaker.
|
||||
|
@ -278,7 +278,7 @@ class SelectorGroupChat(BaseGroupChat):
|
|||
from autogen_agentchat.teams import SelectorGroupChat
|
||||
from autogen_agentchat.conditions import TextMentionTermination
|
||||
from autogen_agentchat.ui import Console
|
||||
from autogen_agentchat.messages import AgentMessage
|
||||
from autogen_agentchat.messages import AgentEvent, ChatMessage
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
|
@ -304,7 +304,7 @@ class SelectorGroupChat(BaseGroupChat):
|
|||
system_message="Check the answer and respond with 'Correct!' or 'Incorrect!'",
|
||||
)
|
||||
|
||||
def selector_func(messages: Sequence[AgentMessage]) -> str | None:
|
||||
def selector_func(messages: Sequence[AgentEvent | ChatMessage]) -> str | None:
|
||||
if len(messages) == 1 or messages[-1].content == "Incorrect!":
|
||||
return "Agent1"
|
||||
if messages[-1].source == "Agent1":
|
||||
|
@ -341,7 +341,7 @@ Read the following conversation. Then select the next role from {participants} t
|
|||
Read the above conversation. Then select the next role from {participants} to play. Only return the role.
|
||||
""",
|
||||
allow_repeated_speaker: bool = False,
|
||||
selector_func: Callable[[Sequence[AgentMessage]], str | None] | None = None,
|
||||
selector_func: Callable[[Sequence[AgentEvent | ChatMessage]], str | None] | None = None,
|
||||
):
|
||||
super().__init__(
|
||||
participants,
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
from typing import Any, Callable, List, Mapping
|
||||
|
||||
from ...base import ChatAgent, TerminationCondition
|
||||
from ...messages import AgentMessage, ChatMessage, HandoffMessage
|
||||
from ...messages import AgentEvent, ChatMessage, HandoffMessage
|
||||
from ...state import SwarmManagerState
|
||||
from ._base_group_chat import BaseGroupChat
|
||||
from ._base_group_chat_manager import BaseGroupChatManager
|
||||
|
@ -64,7 +64,7 @@ class SwarmGroupChatManager(BaseGroupChatManager):
|
|||
await self._termination_condition.reset()
|
||||
self._current_speaker = self._participant_topic_types[0]
|
||||
|
||||
async def select_speaker(self, thread: List[AgentMessage]) -> str:
|
||||
async def select_speaker(self, thread: List[AgentEvent | ChatMessage]) -> str:
|
||||
"""Select a speaker from the participants based on handoff message.
|
||||
Looks for the last handoff message in the thread to determine the next speaker."""
|
||||
if len(thread) == 0:
|
||||
|
|
|
@ -7,7 +7,7 @@ from autogen_core import Image
|
|||
from autogen_core.models import RequestUsage
|
||||
|
||||
from autogen_agentchat.base import Response, TaskResult
|
||||
from autogen_agentchat.messages import AgentMessage, MultiModalMessage
|
||||
from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage
|
||||
|
||||
|
||||
def _is_running_in_iterm() -> bool:
|
||||
|
@ -22,7 +22,7 @@ T = TypeVar("T", bound=TaskResult | Response)
|
|||
|
||||
|
||||
async def Console(
|
||||
stream: AsyncGenerator[AgentMessage | T, None],
|
||||
stream: AsyncGenerator[AgentEvent | ChatMessage | T, None],
|
||||
*,
|
||||
no_inline_images: bool = False,
|
||||
) -> T:
|
||||
|
@ -32,7 +32,7 @@ async def Console(
|
|||
Returns the last processed TaskResult or Response.
|
||||
|
||||
Args:
|
||||
stream (AsyncGenerator[AgentMessage | TaskResult, None] | AsyncGenerator[AgentMessage | Response, None]): Message stream to render.
|
||||
stream (AsyncGenerator[AgentEvent | ChatMessage | TaskResult, None] | AsyncGenerator[AgentEvent | ChatMessage | Response, None]): Message stream to render.
|
||||
This can be from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`.
|
||||
no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False.
|
||||
|
||||
|
@ -93,7 +93,7 @@ async def Console(
|
|||
|
||||
else:
|
||||
# Cast required for mypy to be happy
|
||||
message = cast(AgentMessage, message) # type: ignore
|
||||
message = cast(AgentEvent | ChatMessage, message) # type: ignore
|
||||
output = f"{'-' * 10} {message.source} {'-' * 10}\n{_message_to_str(message, render_image_iterm=render_image_iterm)}\n"
|
||||
if message.models_usage:
|
||||
output += f"[Prompt tokens: {message.models_usage.prompt_tokens}, Completion tokens: {message.models_usage.completion_tokens}]\n"
|
||||
|
@ -114,7 +114,7 @@ def _image_to_iterm(image: Image) -> str:
|
|||
return f"\033]1337;File=inline=1:{image_data}\a\n"
|
||||
|
||||
|
||||
def _message_to_str(message: AgentMessage, *, render_image_iterm: bool = False) -> str:
|
||||
def _message_to_str(message: AgentEvent | ChatMessage, *, render_image_iterm: bool = False) -> str:
|
||||
if isinstance(message, MultiModalMessage):
|
||||
result: List[str] = []
|
||||
for c in message.content:
|
||||
|
|
|
@ -12,8 +12,8 @@ from autogen_agentchat.messages import (
|
|||
HandoffMessage,
|
||||
MultiModalMessage,
|
||||
TextMessage,
|
||||
ToolCallMessage,
|
||||
ToolCallResultMessage,
|
||||
ToolCallExecutionEvent,
|
||||
ToolCallRequestEvent,
|
||||
)
|
||||
from autogen_core import Image
|
||||
from autogen_core.tools import FunctionTool
|
||||
|
@ -136,11 +136,11 @@ async def test_run_with_tools(monkeypatch: pytest.MonkeyPatch) -> None:
|
|||
assert len(result.messages) == 4
|
||||
assert isinstance(result.messages[0], TextMessage)
|
||||
assert result.messages[0].models_usage is None
|
||||
assert isinstance(result.messages[1], ToolCallMessage)
|
||||
assert isinstance(result.messages[1], ToolCallRequestEvent)
|
||||
assert result.messages[1].models_usage is not None
|
||||
assert result.messages[1].models_usage.completion_tokens == 5
|
||||
assert result.messages[1].models_usage.prompt_tokens == 10
|
||||
assert isinstance(result.messages[2], ToolCallResultMessage)
|
||||
assert isinstance(result.messages[2], ToolCallExecutionEvent)
|
||||
assert result.messages[2].models_usage is None
|
||||
assert isinstance(result.messages[3], TextMessage)
|
||||
assert result.messages[3].content == "pass"
|
||||
|
@ -235,11 +235,11 @@ async def test_run_with_tools_and_reflection(monkeypatch: pytest.MonkeyPatch) ->
|
|||
assert len(result.messages) == 4
|
||||
assert isinstance(result.messages[0], TextMessage)
|
||||
assert result.messages[0].models_usage is None
|
||||
assert isinstance(result.messages[1], ToolCallMessage)
|
||||
assert isinstance(result.messages[1], ToolCallRequestEvent)
|
||||
assert result.messages[1].models_usage is not None
|
||||
assert result.messages[1].models_usage.completion_tokens == 5
|
||||
assert result.messages[1].models_usage.prompt_tokens == 10
|
||||
assert isinstance(result.messages[2], ToolCallResultMessage)
|
||||
assert isinstance(result.messages[2], ToolCallExecutionEvent)
|
||||
assert result.messages[2].models_usage is None
|
||||
assert isinstance(result.messages[3], TextMessage)
|
||||
assert result.messages[3].content == "Hello"
|
||||
|
@ -323,11 +323,11 @@ async def test_handoffs(monkeypatch: pytest.MonkeyPatch) -> None:
|
|||
assert len(result.messages) == 4
|
||||
assert isinstance(result.messages[0], TextMessage)
|
||||
assert result.messages[0].models_usage is None
|
||||
assert isinstance(result.messages[1], ToolCallMessage)
|
||||
assert isinstance(result.messages[1], ToolCallRequestEvent)
|
||||
assert result.messages[1].models_usage is not None
|
||||
assert result.messages[1].models_usage.completion_tokens == 43
|
||||
assert result.messages[1].models_usage.prompt_tokens == 42
|
||||
assert isinstance(result.messages[2], ToolCallResultMessage)
|
||||
assert isinstance(result.messages[2], ToolCallExecutionEvent)
|
||||
assert result.messages[2].models_usage is None
|
||||
assert isinstance(result.messages[3], HandoffMessage)
|
||||
assert result.messages[3].content == handoff.message
|
||||
|
|
|
@ -14,14 +14,14 @@ from autogen_agentchat.agents import (
|
|||
from autogen_agentchat.base import Handoff, Response, TaskResult
|
||||
from autogen_agentchat.conditions import HandoffTermination, MaxMessageTermination, TextMentionTermination
|
||||
from autogen_agentchat.messages import (
|
||||
AgentMessage,
|
||||
AgentEvent,
|
||||
ChatMessage,
|
||||
HandoffMessage,
|
||||
MultiModalMessage,
|
||||
StopMessage,
|
||||
TextMessage,
|
||||
ToolCallMessage,
|
||||
ToolCallResultMessage,
|
||||
ToolCallExecutionEvent,
|
||||
ToolCallRequestEvent,
|
||||
)
|
||||
from autogen_agentchat.teams import (
|
||||
RoundRobinGroupChat,
|
||||
|
@ -323,8 +323,8 @@ async def test_round_robin_group_chat_with_tools(monkeypatch: pytest.MonkeyPatch
|
|||
)
|
||||
assert len(result.messages) == 8
|
||||
assert isinstance(result.messages[0], TextMessage) # task
|
||||
assert isinstance(result.messages[1], ToolCallMessage) # tool call
|
||||
assert isinstance(result.messages[2], ToolCallResultMessage) # tool call result
|
||||
assert isinstance(result.messages[1], ToolCallRequestEvent) # tool call
|
||||
assert isinstance(result.messages[2], ToolCallExecutionEvent) # tool call result
|
||||
assert isinstance(result.messages[3], TextMessage) # tool use agent response
|
||||
assert isinstance(result.messages[4], TextMessage) # echo agent response
|
||||
assert isinstance(result.messages[5], TextMessage) # tool use agent response
|
||||
|
@ -747,7 +747,7 @@ async def test_selector_group_chat_custom_selector(monkeypatch: pytest.MonkeyPat
|
|||
agent3 = _EchoAgent("agent3", description="echo agent 3")
|
||||
agent4 = _EchoAgent("agent4", description="echo agent 4")
|
||||
|
||||
def _select_agent(messages: Sequence[AgentMessage]) -> str | None:
|
||||
def _select_agent(messages: Sequence[AgentEvent | ChatMessage]) -> str | None:
|
||||
if len(messages) == 0:
|
||||
return "agent1"
|
||||
elif messages[-1].source == "agent1":
|
||||
|
@ -920,8 +920,8 @@ async def test_swarm_handoff_using_tool_calls(monkeypatch: pytest.MonkeyPatch) -
|
|||
result = await team.run(task="task")
|
||||
assert len(result.messages) == 7
|
||||
assert result.messages[0].content == "task"
|
||||
assert isinstance(result.messages[1], ToolCallMessage)
|
||||
assert isinstance(result.messages[2], ToolCallResultMessage)
|
||||
assert isinstance(result.messages[1], ToolCallRequestEvent)
|
||||
assert isinstance(result.messages[2], ToolCallExecutionEvent)
|
||||
assert result.messages[3].content == "handoff to agent2"
|
||||
assert result.messages[4].content == "Transferred to agent1."
|
||||
assert result.messages[5].content == "Hello"
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -12,7 +12,7 @@
|
|||
"- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.name`: The unique name of the agent.\n",
|
||||
"- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.description`: The description of the agent in text.\n",
|
||||
"- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: Send the agent a sequence of {py:class}`~autogen_agentchat.messages.ChatMessage` get a {py:class}`~autogen_agentchat.base.Response`.\n",
|
||||
"- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`: Same as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` but returns an iterator of {py:class}`~autogen_agentchat.messages.AgentMessage` followed by a {py:class}`~autogen_agentchat.base.Response` as the last item.\n",
|
||||
"- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`: Same as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` but returns an iterator of {py:class}`~autogen_agentchat.messages.AgentEvent` or {py:class}`~autogen_agentchat.messages.ChatMessage` followed by a {py:class}`~autogen_agentchat.base.Response` as the last item.\n",
|
||||
"- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: Reset the agent to its initial state.\n",
|
||||
"\n",
|
||||
"See {py:mod}`autogen_agentchat.messages` for more information on AgentChat message types.\n",
|
||||
|
@ -74,7 +74,7 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[ToolCallMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=61, completion_tokens=15), content=[FunctionCall(id='call_hqVC7UJUPhKaiJwgVKkg66ak', arguments='{\"query\":\"AutoGen\"}', name='web_search')]), ToolCallResultMessage(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_hqVC7UJUPhKaiJwgVKkg66ak')])]\n",
|
||||
"[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=61, completion_tokens=15), content=[FunctionCall(id='call_hqVC7UJUPhKaiJwgVKkg66ak', arguments='{\"query\":\"AutoGen\"}', name='web_search')]), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_hqVC7UJUPhKaiJwgVKkg66ak')])]\n",
|
||||
"source='assistant' models_usage=RequestUsage(prompt_tokens=92, completion_tokens=14) content='AutoGen is a programming framework designed for building multi-agent applications.'\n"
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1,313 +1,313 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Custom Agents\n",
|
||||
"\n",
|
||||
"You may have agents with behaviors that do not fall into a preset. \n",
|
||||
"In such cases, you can build custom agents.\n",
|
||||
"\n",
|
||||
"All agents in AgentChat inherit from {py:class}`~autogen_agentchat.agents.BaseChatAgent` \n",
|
||||
"class and implement the following abstract methods and attributes:\n",
|
||||
"\n",
|
||||
"- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: The abstract method that defines the behavior of the agent in response to messages. This method is called when the agent is asked to provide a response in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`. It returns a {py:class}`~autogen_agentchat.base.Response` object.\n",
|
||||
"- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n",
|
||||
"- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.ChatMessage` message types the agent can produce in its response.\n",
|
||||
"\n",
|
||||
"Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n",
|
||||
"uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n",
|
||||
"that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n",
|
||||
"yields all messages in the response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## CountDownAgent\n",
|
||||
"\n",
|
||||
"In this example, we create a simple agent that counts down from a given number to zero,\n",
|
||||
"and produces a stream of messages with the current count."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"3...\n",
|
||||
"2...\n",
|
||||
"1...\n",
|
||||
"Done!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import AsyncGenerator, List, Sequence\n",
|
||||
"\n",
|
||||
"from autogen_agentchat.agents import BaseChatAgent\n",
|
||||
"from autogen_agentchat.base import Response\n",
|
||||
"from autogen_agentchat.messages import AgentMessage, ChatMessage, TextMessage\n",
|
||||
"from autogen_core import CancellationToken\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class CountDownAgent(BaseChatAgent):\n",
|
||||
" def __init__(self, name: str, count: int = 3):\n",
|
||||
" super().__init__(name, \"A simple agent that counts down.\")\n",
|
||||
" self._count = count\n",
|
||||
"\n",
|
||||
" @property\n",
|
||||
" def produced_message_types(self) -> List[type[ChatMessage]]:\n",
|
||||
" return [TextMessage]\n",
|
||||
"\n",
|
||||
" async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n",
|
||||
" # Calls the on_messages_stream.\n",
|
||||
" response: Response | None = None\n",
|
||||
" async for message in self.on_messages_stream(messages, cancellation_token):\n",
|
||||
" if isinstance(message, Response):\n",
|
||||
" response = message\n",
|
||||
" assert response is not None\n",
|
||||
" return response\n",
|
||||
"\n",
|
||||
" async def on_messages_stream(\n",
|
||||
" self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n",
|
||||
" ) -> AsyncGenerator[AgentMessage | Response, None]:\n",
|
||||
" inner_messages: List[AgentMessage] = []\n",
|
||||
" for i in range(self._count, 0, -1):\n",
|
||||
" msg = TextMessage(content=f\"{i}...\", source=self.name)\n",
|
||||
" inner_messages.append(msg)\n",
|
||||
" yield msg\n",
|
||||
" # The response is returned at the end of the stream.\n",
|
||||
" # It contains the final message and all the inner messages.\n",
|
||||
" yield Response(chat_message=TextMessage(content=\"Done!\", source=self.name), inner_messages=inner_messages)\n",
|
||||
"\n",
|
||||
" async def on_reset(self, cancellation_token: CancellationToken) -> None:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def run_countdown_agent() -> None:\n",
|
||||
" # Create a countdown agent.\n",
|
||||
" countdown_agent = CountDownAgent(\"countdown\")\n",
|
||||
"\n",
|
||||
" # Run the agent with a given task and stream the response.\n",
|
||||
" async for message in countdown_agent.on_messages_stream([], CancellationToken()):\n",
|
||||
" if isinstance(message, Response):\n",
|
||||
" print(message.chat_message.content)\n",
|
||||
" else:\n",
|
||||
" print(message.content)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Use asyncio.run(run_countdown_agent()) when running in a script.\n",
|
||||
"await run_countdown_agent()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## ArithmeticAgent\n",
|
||||
"\n",
|
||||
"In this example, we create an agent class that can perform simple arithmetic operations\n",
|
||||
"on a given integer. Then, we will use different instances of this agent class\n",
|
||||
"in a {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n",
|
||||
"to transform a given integer into another integer by applying a sequence of arithmetic operations.\n",
|
||||
"\n",
|
||||
"The `ArithmeticAgent` class takes an `operator_func` that takes an integer and returns an integer,\n",
|
||||
"after applying an arithmetic operation to the integer.\n",
|
||||
"In its `on_messages` method, it applies the `operator_func` to the integer in the input message,\n",
|
||||
"and returns a response with the result."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Callable, List, Sequence\n",
|
||||
"\n",
|
||||
"from autogen_agentchat.agents import BaseChatAgent\n",
|
||||
"from autogen_agentchat.base import Response\n",
|
||||
"from autogen_agentchat.conditions import MaxMessageTermination\n",
|
||||
"from autogen_agentchat.messages import ChatMessage\n",
|
||||
"from autogen_agentchat.teams import SelectorGroupChat\n",
|
||||
"from autogen_agentchat.ui import Console\n",
|
||||
"from autogen_core import CancellationToken\n",
|
||||
"from autogen_ext.models.openai import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class ArithmeticAgent(BaseChatAgent):\n",
|
||||
" def __init__(self, name: str, description: str, operator_func: Callable[[int], int]) -> None:\n",
|
||||
" super().__init__(name, description=description)\n",
|
||||
" self._operator_func = operator_func\n",
|
||||
" self._message_history: List[ChatMessage] = []\n",
|
||||
"\n",
|
||||
" @property\n",
|
||||
" def produced_message_types(self) -> List[type[ChatMessage]]:\n",
|
||||
" return [TextMessage]\n",
|
||||
"\n",
|
||||
" async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n",
|
||||
" # Update the message history.\n",
|
||||
" # NOTE: it is possible the messages is an empty list, which means the agent was selected previously.\n",
|
||||
" self._message_history.extend(messages)\n",
|
||||
" # Parse the number in the last message.\n",
|
||||
" assert isinstance(self._message_history[-1], TextMessage)\n",
|
||||
" number = int(self._message_history[-1].content)\n",
|
||||
" # Apply the operator function to the number.\n",
|
||||
" result = self._operator_func(number)\n",
|
||||
" # Create a new message with the result.\n",
|
||||
" response_message = TextMessage(content=str(result), source=self.name)\n",
|
||||
" # Update the message history.\n",
|
||||
" self._message_history.append(response_message)\n",
|
||||
" # Return the response.\n",
|
||||
" return Response(chat_message=response_message)\n",
|
||||
"\n",
|
||||
" async def on_reset(self, cancellation_token: CancellationToken) -> None:\n",
|
||||
" pass"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{note}\n",
|
||||
"The `on_messages` method may be called with an empty list of messages, in which\n",
|
||||
"case it means the agent was called previously and is now being called again,\n",
|
||||
"without any new messages from the caller. So it is important to keep a history\n",
|
||||
"of the previous messages received by the agent, and use that history to generate\n",
|
||||
"the response.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with 5 instances of `ArithmeticAgent`:\n",
|
||||
"\n",
|
||||
"- one that adds 1 to the input integer,\n",
|
||||
"- one that subtracts 1 from the input integer,\n",
|
||||
"- one that multiplies the input integer by 2,\n",
|
||||
"- one that divides the input integer by 2 and rounds down to the nearest integer, and\n",
|
||||
"- one that returns the input integer unchanged.\n",
|
||||
"\n",
|
||||
"We then create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with these agents,\n",
|
||||
"and set the appropriate selector settings:\n",
|
||||
"\n",
|
||||
"- allow the same agent to be selected consecutively to allow for repeated operations, and\n",
|
||||
"- customize the selector prompt to tailor the model's response to the specific task."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"---------- user ----------\n",
|
||||
"Apply the operations to turn the given number into 25.\n",
|
||||
"---------- user ----------\n",
|
||||
"10\n",
|
||||
"---------- multiply_agent ----------\n",
|
||||
"20\n",
|
||||
"---------- add_agent ----------\n",
|
||||
"21\n",
|
||||
"---------- multiply_agent ----------\n",
|
||||
"42\n",
|
||||
"---------- divide_agent ----------\n",
|
||||
"21\n",
|
||||
"---------- add_agent ----------\n",
|
||||
"22\n",
|
||||
"---------- add_agent ----------\n",
|
||||
"23\n",
|
||||
"---------- add_agent ----------\n",
|
||||
"24\n",
|
||||
"---------- add_agent ----------\n",
|
||||
"25\n",
|
||||
"---------- Summary ----------\n",
|
||||
"Number of messages: 10\n",
|
||||
"Finish reason: Maximum number of messages 10 reached, current message count: 10\n",
|
||||
"Total prompt tokens: 0\n",
|
||||
"Total completion tokens: 0\n",
|
||||
"Duration: 2.40 seconds\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async def run_number_agents() -> None:\n",
|
||||
" # Create agents for number operations.\n",
|
||||
" add_agent = ArithmeticAgent(\"add_agent\", \"Adds 1 to the number.\", lambda x: x + 1)\n",
|
||||
" multiply_agent = ArithmeticAgent(\"multiply_agent\", \"Multiplies the number by 2.\", lambda x: x * 2)\n",
|
||||
" subtract_agent = ArithmeticAgent(\"subtract_agent\", \"Subtracts 1 from the number.\", lambda x: x - 1)\n",
|
||||
" divide_agent = ArithmeticAgent(\"divide_agent\", \"Divides the number by 2 and rounds down.\", lambda x: x // 2)\n",
|
||||
" identity_agent = ArithmeticAgent(\"identity_agent\", \"Returns the number as is.\", lambda x: x)\n",
|
||||
"\n",
|
||||
" # The termination condition is to stop after 10 messages.\n",
|
||||
" termination_condition = MaxMessageTermination(10)\n",
|
||||
"\n",
|
||||
" # Create a selector group chat.\n",
|
||||
" selector_group_chat = SelectorGroupChat(\n",
|
||||
" [add_agent, multiply_agent, subtract_agent, divide_agent, identity_agent],\n",
|
||||
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n",
|
||||
" termination_condition=termination_condition,\n",
|
||||
" allow_repeated_speaker=True, # Allow the same agent to speak multiple times, necessary for this task.\n",
|
||||
" selector_prompt=(\n",
|
||||
" \"Available roles:\\n{roles}\\nTheir job descriptions:\\n{participants}\\n\"\n",
|
||||
" \"Current conversation history:\\n{history}\\n\"\n",
|
||||
" \"Please select the most appropriate role for the next message, and only return the role name.\"\n",
|
||||
" ),\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Run the selector group chat with a given task and stream the response.\n",
|
||||
" task: List[ChatMessage] = [\n",
|
||||
" TextMessage(content=\"Apply the operations to turn the given number into 25.\", source=\"user\"),\n",
|
||||
" TextMessage(content=\"10\", source=\"user\"),\n",
|
||||
" ]\n",
|
||||
" stream = selector_group_chat.run_stream(task=task)\n",
|
||||
" await Console(stream)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Use asyncio.run(run_number_agents()) when running in a script.\n",
|
||||
"await run_number_agents()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"From the output, we can see that the agents have successfully transformed the input integer\n",
|
||||
"from 10 to 25 by choosing appropriate agents that apply the arithmetic operations in sequence."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Custom Agents\n",
|
||||
"\n",
|
||||
"You may have agents with behaviors that do not fall into a preset. \n",
|
||||
"In such cases, you can build custom agents.\n",
|
||||
"\n",
|
||||
"All agents in AgentChat inherit from {py:class}`~autogen_agentchat.agents.BaseChatAgent` \n",
|
||||
"class and implement the following abstract methods and attributes:\n",
|
||||
"\n",
|
||||
"- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: The abstract method that defines the behavior of the agent in response to messages. This method is called when the agent is asked to provide a response in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`. It returns a {py:class}`~autogen_agentchat.base.Response` object.\n",
|
||||
"- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n",
|
||||
"- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.ChatMessage` message types the agent can produce in its response.\n",
|
||||
"\n",
|
||||
"Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n",
|
||||
"uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n",
|
||||
"that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n",
|
||||
"yields all messages in the response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## CountDownAgent\n",
|
||||
"\n",
|
||||
"In this example, we create a simple agent that counts down from a given number to zero,\n",
|
||||
"and produces a stream of messages with the current count."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"3...\n",
|
||||
"2...\n",
|
||||
"1...\n",
|
||||
"Done!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import AsyncGenerator, List, Sequence\n",
|
||||
"\n",
|
||||
"from autogen_agentchat.agents import BaseChatAgent\n",
|
||||
"from autogen_agentchat.base import Response\n",
|
||||
"from autogen_agentchat.messages import AgentEvent, ChatMessage, TextMessage\n",
|
||||
"from autogen_core import CancellationToken\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class CountDownAgent(BaseChatAgent):\n",
|
||||
" def __init__(self, name: str, count: int = 3):\n",
|
||||
" super().__init__(name, \"A simple agent that counts down.\")\n",
|
||||
" self._count = count\n",
|
||||
"\n",
|
||||
" @property\n",
|
||||
" def produced_message_types(self) -> List[type[ChatMessage]]:\n",
|
||||
" return [TextMessage]\n",
|
||||
"\n",
|
||||
" async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n",
|
||||
" # Calls the on_messages_stream.\n",
|
||||
" response: Response | None = None\n",
|
||||
" async for message in self.on_messages_stream(messages, cancellation_token):\n",
|
||||
" if isinstance(message, Response):\n",
|
||||
" response = message\n",
|
||||
" assert response is not None\n",
|
||||
" return response\n",
|
||||
"\n",
|
||||
" async def on_messages_stream(\n",
|
||||
" self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n",
|
||||
" ) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:\n",
|
||||
" inner_messages: List[AgentEvent | ChatMessage] = []\n",
|
||||
" for i in range(self._count, 0, -1):\n",
|
||||
" msg = TextMessage(content=f\"{i}...\", source=self.name)\n",
|
||||
" inner_messages.append(msg)\n",
|
||||
" yield msg\n",
|
||||
" # The response is returned at the end of the stream.\n",
|
||||
" # It contains the final message and all the inner messages.\n",
|
||||
" yield Response(chat_message=TextMessage(content=\"Done!\", source=self.name), inner_messages=inner_messages)\n",
|
||||
"\n",
|
||||
" async def on_reset(self, cancellation_token: CancellationToken) -> None:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def run_countdown_agent() -> None:\n",
|
||||
" # Create a countdown agent.\n",
|
||||
" countdown_agent = CountDownAgent(\"countdown\")\n",
|
||||
"\n",
|
||||
" # Run the agent with a given task and stream the response.\n",
|
||||
" async for message in countdown_agent.on_messages_stream([], CancellationToken()):\n",
|
||||
" if isinstance(message, Response):\n",
|
||||
" print(message.chat_message.content)\n",
|
||||
" else:\n",
|
||||
" print(message.content)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Use asyncio.run(run_countdown_agent()) when running in a script.\n",
|
||||
"await run_countdown_agent()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## ArithmeticAgent\n",
|
||||
"\n",
|
||||
"In this example, we create an agent class that can perform simple arithmetic operations\n",
|
||||
"on a given integer. Then, we will use different instances of this agent class\n",
|
||||
"in a {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n",
|
||||
"to transform a given integer into another integer by applying a sequence of arithmetic operations.\n",
|
||||
"\n",
|
||||
"The `ArithmeticAgent` class takes an `operator_func` that takes an integer and returns an integer,\n",
|
||||
"after applying an arithmetic operation to the integer.\n",
|
||||
"In its `on_messages` method, it applies the `operator_func` to the integer in the input message,\n",
|
||||
"and returns a response with the result."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Callable, List, Sequence\n",
|
||||
"\n",
|
||||
"from autogen_agentchat.agents import BaseChatAgent\n",
|
||||
"from autogen_agentchat.base import Response\n",
|
||||
"from autogen_agentchat.conditions import MaxMessageTermination\n",
|
||||
"from autogen_agentchat.messages import ChatMessage\n",
|
||||
"from autogen_agentchat.teams import SelectorGroupChat\n",
|
||||
"from autogen_agentchat.ui import Console\n",
|
||||
"from autogen_core import CancellationToken\n",
|
||||
"from autogen_ext.models.openai import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class ArithmeticAgent(BaseChatAgent):\n",
|
||||
" def __init__(self, name: str, description: str, operator_func: Callable[[int], int]) -> None:\n",
|
||||
" super().__init__(name, description=description)\n",
|
||||
" self._operator_func = operator_func\n",
|
||||
" self._message_history: List[ChatMessage] = []\n",
|
||||
"\n",
|
||||
" @property\n",
|
||||
" def produced_message_types(self) -> List[type[ChatMessage]]:\n",
|
||||
" return [TextMessage]\n",
|
||||
"\n",
|
||||
" async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n",
|
||||
" # Update the message history.\n",
|
||||
" # NOTE: it is possible the messages is an empty list, which means the agent was selected previously.\n",
|
||||
" self._message_history.extend(messages)\n",
|
||||
" # Parse the number in the last message.\n",
|
||||
" assert isinstance(self._message_history[-1], TextMessage)\n",
|
||||
" number = int(self._message_history[-1].content)\n",
|
||||
" # Apply the operator function to the number.\n",
|
||||
" result = self._operator_func(number)\n",
|
||||
" # Create a new message with the result.\n",
|
||||
" response_message = TextMessage(content=str(result), source=self.name)\n",
|
||||
" # Update the message history.\n",
|
||||
" self._message_history.append(response_message)\n",
|
||||
" # Return the response.\n",
|
||||
" return Response(chat_message=response_message)\n",
|
||||
"\n",
|
||||
" async def on_reset(self, cancellation_token: CancellationToken) -> None:\n",
|
||||
" pass"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{note}\n",
|
||||
"The `on_messages` method may be called with an empty list of messages, in which\n",
|
||||
"case it means the agent was called previously and is now being called again,\n",
|
||||
"without any new messages from the caller. So it is important to keep a history\n",
|
||||
"of the previous messages received by the agent, and use that history to generate\n",
|
||||
"the response.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with 5 instances of `ArithmeticAgent`:\n",
|
||||
"\n",
|
||||
"- one that adds 1 to the input integer,\n",
|
||||
"- one that subtracts 1 from the input integer,\n",
|
||||
"- one that multiplies the input integer by 2,\n",
|
||||
"- one that divides the input integer by 2 and rounds down to the nearest integer, and\n",
|
||||
"- one that returns the input integer unchanged.\n",
|
||||
"\n",
|
||||
"We then create a {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with these agents,\n",
|
||||
"and set the appropriate selector settings:\n",
|
||||
"\n",
|
||||
"- allow the same agent to be selected consecutively to allow for repeated operations, and\n",
|
||||
"- customize the selector prompt to tailor the model's response to the specific task."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"---------- user ----------\n",
|
||||
"Apply the operations to turn the given number into 25.\n",
|
||||
"---------- user ----------\n",
|
||||
"10\n",
|
||||
"---------- multiply_agent ----------\n",
|
||||
"20\n",
|
||||
"---------- add_agent ----------\n",
|
||||
"21\n",
|
||||
"---------- multiply_agent ----------\n",
|
||||
"42\n",
|
||||
"---------- divide_agent ----------\n",
|
||||
"21\n",
|
||||
"---------- add_agent ----------\n",
|
||||
"22\n",
|
||||
"---------- add_agent ----------\n",
|
||||
"23\n",
|
||||
"---------- add_agent ----------\n",
|
||||
"24\n",
|
||||
"---------- add_agent ----------\n",
|
||||
"25\n",
|
||||
"---------- Summary ----------\n",
|
||||
"Number of messages: 10\n",
|
||||
"Finish reason: Maximum number of messages 10 reached, current message count: 10\n",
|
||||
"Total prompt tokens: 0\n",
|
||||
"Total completion tokens: 0\n",
|
||||
"Duration: 2.40 seconds\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async def run_number_agents() -> None:\n",
|
||||
" # Create agents for number operations.\n",
|
||||
" add_agent = ArithmeticAgent(\"add_agent\", \"Adds 1 to the number.\", lambda x: x + 1)\n",
|
||||
" multiply_agent = ArithmeticAgent(\"multiply_agent\", \"Multiplies the number by 2.\", lambda x: x * 2)\n",
|
||||
" subtract_agent = ArithmeticAgent(\"subtract_agent\", \"Subtracts 1 from the number.\", lambda x: x - 1)\n",
|
||||
" divide_agent = ArithmeticAgent(\"divide_agent\", \"Divides the number by 2 and rounds down.\", lambda x: x // 2)\n",
|
||||
" identity_agent = ArithmeticAgent(\"identity_agent\", \"Returns the number as is.\", lambda x: x)\n",
|
||||
"\n",
|
||||
" # The termination condition is to stop after 10 messages.\n",
|
||||
" termination_condition = MaxMessageTermination(10)\n",
|
||||
"\n",
|
||||
" # Create a selector group chat.\n",
|
||||
" selector_group_chat = SelectorGroupChat(\n",
|
||||
" [add_agent, multiply_agent, subtract_agent, divide_agent, identity_agent],\n",
|
||||
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n",
|
||||
" termination_condition=termination_condition,\n",
|
||||
" allow_repeated_speaker=True, # Allow the same agent to speak multiple times, necessary for this task.\n",
|
||||
" selector_prompt=(\n",
|
||||
" \"Available roles:\\n{roles}\\nTheir job descriptions:\\n{participants}\\n\"\n",
|
||||
" \"Current conversation history:\\n{history}\\n\"\n",
|
||||
" \"Please select the most appropriate role for the next message, and only return the role name.\"\n",
|
||||
" ),\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Run the selector group chat with a given task and stream the response.\n",
|
||||
" task: List[ChatMessage] = [\n",
|
||||
" TextMessage(content=\"Apply the operations to turn the given number into 25.\", source=\"user\"),\n",
|
||||
" TextMessage(content=\"10\", source=\"user\"),\n",
|
||||
" ]\n",
|
||||
" stream = selector_group_chat.run_stream(task=task)\n",
|
||||
" await Console(stream)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Use asyncio.run(run_number_agents()) when running in a script.\n",
|
||||
"await run_number_agents()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"From the output, we can see that the agents have successfully transformed the input integer\n",
|
||||
"from 10 to 25 by choosing appropriate agents that apply the arithmetic operations in sequence."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
"At a high level, messages in AgentChat can be categorized into two types: agent-agent messages and an agent's internal events and messages.\n",
|
||||
"\n",
|
||||
"### Agent-Agent Messages\n",
|
||||
"AgentChat supports many message types for agent-to-agent communication. The most common one is the {py:class}`~autogen_agentchat.messages.ChatMessage`. This message type allows both text and multimodal communication and subsumes other message types, such as {py:class}`~autogen_agentchat.messages.TextMessage` or {py:class}`~autogen_agentchat.messages.MultiModalMessage`.\n",
|
||||
"AgentChat supports many message types for agent-to-agent communication. They belong to the union type {py:class}`~autogen_agentchat.messages.ChatMessage`. This message type allows both text and multimodal communication and subsumes other message types, such as {py:class}`~autogen_agentchat.messages.TextMessage` or {py:class}`~autogen_agentchat.messages.MultiModalMessage`.\n",
|
||||
"\n",
|
||||
"For example, the following code snippet demonstrates how to create a text message, which accepts a string content and a string source:"
|
||||
]
|
||||
|
@ -91,13 +91,13 @@
|
|||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Internal Events and Messages\n",
|
||||
"### Internal Events\n",
|
||||
"\n",
|
||||
"AgentChat also supports the concept of `inner_messages` - messages that are internal to an agent. These messages are used to communicate events and information on actions _within_ the agent itself.\n",
|
||||
"AgentChat also supports the concept of `events` - messages that are internal to an agent. These messages are used to communicate events and information on actions _within_ the agent itself, and belong to the union type {py:class}`~autogen_agentchat.messages.AgentEvent`.\n",
|
||||
"\n",
|
||||
"Examples of these include {py:class}`~autogen_agentchat.messages.ToolCallMessage`, which indicates that a request was made to call a tool, and {py:class}`~autogen_agentchat.messages.ToolCallResultMessage`, which contains the results of tool calls.\n",
|
||||
"Examples of these include {py:class}`~autogen_agentchat.messages.ToolCallRequestEvent`, which indicates that a request was made to call a tool, and {py:class}`~autogen_agentchat.messages.ToolCallExecutionEvent`, which contains the results of tool calls.\n",
|
||||
"\n",
|
||||
"Typically, these messages are created by the agent itself and are contained in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response` returned from {py:class}`~autogen_agentchat.base.ChatAgent.on_messages`. If you are building a custom agent and have events that you want to communicate to other entities (e.g., a UI), you can include these in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response`. We will show examples of this in [Custom Agents](./custom-agents.ipynb).\n",
|
||||
"Typically, events are created by the agent itself and are contained in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response` returned from {py:class}`~autogen_agentchat.base.ChatAgent.on_messages`. If you are building a custom agent and have events that you want to communicate to other entities (e.g., a UI), you can include these in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` field of the {py:class}`~autogen_agentchat.base.Response`. We will show examples of this in [Custom Agents](./custom-agents.ipynb).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"You can read about the full set of messages supported in AgentChat in the {py:mod}`~autogen_agentchat.messages` module. \n",
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
|
@ -1,304 +1,304 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Termination \n",
|
||||
"\n",
|
||||
"In the previous section, we explored how to define agents, and organize them into teams that can solve tasks. However, a run can go on forever, and in many cases, we need to know _when_ to stop them. This is the role of the termination condition.\n",
|
||||
"\n",
|
||||
"AgentChat supports several termination condition by providing a base {py:class}`~autogen_agentchat.base.TerminationCondition` class and several implementations that inherit from it.\n",
|
||||
"\n",
|
||||
"A termination condition is a callable that takes a sequece of {py:class}`~autogen_agentchat.messages.AgentMessage` objects **since the last time the condition was called**, and returns a {py:class}`~autogen_agentchat.messages.StopMessage` if the conversation should be terminated, or `None` otherwise.\n",
|
||||
"Once a termination condition has been reached, it must be reset by calling {py:meth}`~autogen_agentchat.base.TerminationCondition.reset` before it can be used again.\n",
|
||||
"\n",
|
||||
"Some important things to note about termination conditions: \n",
|
||||
"- They are stateful but reset automatically after each run ({py:meth}`~autogen_agentchat.base.TaskRunner.run` or {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream`) is finished.\n",
|
||||
"- They can be combined using the AND and OR operators.\n",
|
||||
"\n",
|
||||
"```{note}\n",
|
||||
"For group chat teams (i.e., {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n",
|
||||
"{py:class}`~autogen_agentchat.teams.SelectorGroupChat`, and {py:class}`~autogen_agentchat.teams.Swarm`),\n",
|
||||
"the termination condition is called after each agent responds.\n",
|
||||
"While a response may contain multiple inner messages, the team calls its termination condition just once for all the messages from a single response.\n",
|
||||
"So the condition is called with the \"delta sequence\" of messages since the last time it was called.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Built-In Termination Conditions: \n",
|
||||
"1. {py:class}`~autogen_agentchat.conditions.MaxMessageTermination`: Stops after a specified number of messages have been produced, including both agent and task messages.\n",
|
||||
"2. {py:class}`~autogen_agentchat.conditions.TextMentionTermination`: Stops when specific text or string is mentioned in a message (e.g., \"TERMINATE\").\n",
|
||||
"3. {py:class}`~autogen_agentchat.conditions.TokenUsageTermination`: Stops when a certain number of prompt or completion tokens are used. This requires the agents to report token usage in their messages.\n",
|
||||
"4. {py:class}`~autogen_agentchat.conditions.TimeoutTermination`: Stops after a specified duration in seconds.\n",
|
||||
"5. {py:class}`~autogen_agentchat.conditions.HandoffTermination`: Stops when a handoff to a specific target is requested. Handoff messages can be used to build patterns such as {py:class}`~autogen_agentchat.teams.Swarm`. This is useful when you want to pause the run and allow application or user to provide input when an agent hands off to them.\n",
|
||||
"6. {py:class}`~autogen_agentchat.conditions.SourceMatchTermination`: Stops after a specific agent responds.\n",
|
||||
"7. {py:class}`~autogen_agentchat.conditions.ExternalTermination`: Enables programmatic control of termination from outside the run. This is useful for UI integration (e.g., \"Stop\" buttons in chat interfaces).\n",
|
||||
"8. {py:class}`~autogen_agentchat.conditions.StopMessageTermination`: Stops when a {py:class}`~autogen_agentchat.messages.StopMessage` is produced by an agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To demonstrate the characteristics of termination conditions, we'll create a team consisting of two agents: a primary agent responsible for text generation and a critic agent that reviews and provides feedback on the generated text."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_agentchat.agents import AssistantAgent\n",
|
||||
"from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n",
|
||||
"from autogen_agentchat.teams import RoundRobinGroupChat\n",
|
||||
"from autogen_agentchat.ui import Console\n",
|
||||
"from autogen_ext.models.openai import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"model_client = OpenAIChatCompletionClient(\n",
|
||||
" model=\"gpt-4o\",\n",
|
||||
" temperature=1,\n",
|
||||
" # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Create the primary agent.\n",
|
||||
"primary_agent = AssistantAgent(\n",
|
||||
" \"primary\",\n",
|
||||
" model_client=model_client,\n",
|
||||
" system_message=\"You are a helpful AI assistant.\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Create the critic agent.\n",
|
||||
"critic_agent = AssistantAgent(\n",
|
||||
" \"critic\",\n",
|
||||
" model_client=model_client,\n",
|
||||
" system_message=\"Provide constructive feedback for every message. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's explore how termination conditions automatically reset after each `run` or `run_stream` call, allowing the team to resume its conversation from where it left off."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"---------- user ----------\n",
|
||||
"Write a unique, Haiku about the weather in Paris\n",
|
||||
"---------- primary ----------\n",
|
||||
"Gentle rain whispers, \n",
|
||||
"Cobblestones glisten softly— \n",
|
||||
"Paris dreams in gray.\n",
|
||||
"[Prompt tokens: 30, Completion tokens: 19]\n",
|
||||
"---------- critic ----------\n",
|
||||
"The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\n",
|
||||
"\n",
|
||||
"For example:\n",
|
||||
"Soft rain whispers down, \n",
|
||||
"Cobblestones glisten softly — \n",
|
||||
"Paris dreams in gray.\n",
|
||||
"\n",
|
||||
"This revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\n",
|
||||
"[Prompt tokens: 70, Completion tokens: 120]\n",
|
||||
"---------- Summary ----------\n",
|
||||
"Number of messages: 3\n",
|
||||
"Finish reason: Maximum number of messages 3 reached, current message count: 3\n",
|
||||
"Total prompt tokens: 100\n",
|
||||
"Total completion tokens: 139\n",
|
||||
"Duration: 3.34 seconds\n"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Termination \n",
|
||||
"\n",
|
||||
"In the previous section, we explored how to define agents, and organize them into teams that can solve tasks. However, a run can go on forever, and in many cases, we need to know _when_ to stop them. This is the role of the termination condition.\n",
|
||||
"\n",
|
||||
"AgentChat supports several termination condition by providing a base {py:class}`~autogen_agentchat.base.TerminationCondition` class and several implementations that inherit from it.\n",
|
||||
"\n",
|
||||
"A termination condition is a callable that takes a sequece of {py:class}`~autogen_agentchat.messages.AgentEvent` or {py:class}`~autogen_agentchat.messages.ChatMessage` objects **since the last time the condition was called**, and returns a {py:class}`~autogen_agentchat.messages.StopMessage` if the conversation should be terminated, or `None` otherwise.\n",
|
||||
"Once a termination condition has been reached, it must be reset by calling {py:meth}`~autogen_agentchat.base.TerminationCondition.reset` before it can be used again.\n",
|
||||
"\n",
|
||||
"Some important things to note about termination conditions: \n",
|
||||
"- They are stateful but reset automatically after each run ({py:meth}`~autogen_agentchat.base.TaskRunner.run` or {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream`) is finished.\n",
|
||||
"- They can be combined using the AND and OR operators.\n",
|
||||
"\n",
|
||||
"```{note}\n",
|
||||
"For group chat teams (i.e., {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n",
|
||||
"{py:class}`~autogen_agentchat.teams.SelectorGroupChat`, and {py:class}`~autogen_agentchat.teams.Swarm`),\n",
|
||||
"the termination condition is called after each agent responds.\n",
|
||||
"While a response may contain multiple inner messages, the team calls its termination condition just once for all the messages from a single response.\n",
|
||||
"So the condition is called with the \"delta sequence\" of messages since the last time it was called.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Built-In Termination Conditions: \n",
|
||||
"1. {py:class}`~autogen_agentchat.conditions.MaxMessageTermination`: Stops after a specified number of messages have been produced, including both agent and task messages.\n",
|
||||
"2. {py:class}`~autogen_agentchat.conditions.TextMentionTermination`: Stops when specific text or string is mentioned in a message (e.g., \"TERMINATE\").\n",
|
||||
"3. {py:class}`~autogen_agentchat.conditions.TokenUsageTermination`: Stops when a certain number of prompt or completion tokens are used. This requires the agents to report token usage in their messages.\n",
|
||||
"4. {py:class}`~autogen_agentchat.conditions.TimeoutTermination`: Stops after a specified duration in seconds.\n",
|
||||
"5. {py:class}`~autogen_agentchat.conditions.HandoffTermination`: Stops when a handoff to a specific target is requested. Handoff messages can be used to build patterns such as {py:class}`~autogen_agentchat.teams.Swarm`. This is useful when you want to pause the run and allow application or user to provide input when an agent hands off to them.\n",
|
||||
"6. {py:class}`~autogen_agentchat.conditions.SourceMatchTermination`: Stops after a specific agent responds.\n",
|
||||
"7. {py:class}`~autogen_agentchat.conditions.ExternalTermination`: Enables programmatic control of termination from outside the run. This is useful for UI integration (e.g., \"Stop\" buttons in chat interfaces).\n",
|
||||
"8. {py:class}`~autogen_agentchat.conditions.StopMessageTermination`: Stops when a {py:class}`~autogen_agentchat.messages.StopMessage` is produced by an agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To demonstrate the characteristics of termination conditions, we'll create a team consisting of two agents: a primary agent responsible for text generation and a critic agent that reviews and provides feedback on the generated text."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_agentchat.agents import AssistantAgent\n",
|
||||
"from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n",
|
||||
"from autogen_agentchat.teams import RoundRobinGroupChat\n",
|
||||
"from autogen_agentchat.ui import Console\n",
|
||||
"from autogen_ext.models.openai import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"model_client = OpenAIChatCompletionClient(\n",
|
||||
" model=\"gpt-4o\",\n",
|
||||
" temperature=1,\n",
|
||||
" # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Create the primary agent.\n",
|
||||
"primary_agent = AssistantAgent(\n",
|
||||
" \"primary\",\n",
|
||||
" model_client=model_client,\n",
|
||||
" system_message=\"You are a helpful AI assistant.\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Create the critic agent.\n",
|
||||
"critic_agent = AssistantAgent(\n",
|
||||
" \"critic\",\n",
|
||||
" model_client=model_client,\n",
|
||||
" system_message=\"Provide constructive feedback for every message. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's explore how termination conditions automatically reset after each `run` or `run_stream` call, allowing the team to resume its conversation from where it left off."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"---------- user ----------\n",
|
||||
"Write a unique, Haiku about the weather in Paris\n",
|
||||
"---------- primary ----------\n",
|
||||
"Gentle rain whispers, \n",
|
||||
"Cobblestones glisten softly— \n",
|
||||
"Paris dreams in gray.\n",
|
||||
"[Prompt tokens: 30, Completion tokens: 19]\n",
|
||||
"---------- critic ----------\n",
|
||||
"The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\n",
|
||||
"\n",
|
||||
"For example:\n",
|
||||
"Soft rain whispers down, \n",
|
||||
"Cobblestones glisten softly — \n",
|
||||
"Paris dreams in gray.\n",
|
||||
"\n",
|
||||
"This revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\n",
|
||||
"[Prompt tokens: 70, Completion tokens: 120]\n",
|
||||
"---------- Summary ----------\n",
|
||||
"Number of messages: 3\n",
|
||||
"Finish reason: Maximum number of messages 3 reached, current message count: 3\n",
|
||||
"Total prompt tokens: 100\n",
|
||||
"Total completion tokens: 139\n",
|
||||
"Duration: 3.34 seconds\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=30, completion_tokens=19), content='Gentle rain whispers, \\nCobblestones glisten softly— \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=70, completion_tokens=120), content=\"The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\\n\\nFor example:\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.\\n\\nThis revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"max_msg_termination = MaxMessageTermination(max_messages=3)\n",
|
||||
"round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=max_msg_termination)\n",
|
||||
"\n",
|
||||
"# Use asyncio.run(...) if you are running this script as a standalone script.\n",
|
||||
"await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The conversation stopped after reaching the maximum message limit. Since the primary agent didn't get to respond to the feedback, let's continue the conversation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"---------- primary ----------\n",
|
||||
"Thank you for your feedback. Here is the revised Haiku:\n",
|
||||
"\n",
|
||||
"Soft rain whispers down, \n",
|
||||
"Cobblestones glisten softly — \n",
|
||||
"Paris dreams in gray.\n",
|
||||
"[Prompt tokens: 181, Completion tokens: 32]\n",
|
||||
"---------- critic ----------\n",
|
||||
"The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \n",
|
||||
"\n",
|
||||
"APPROVE\n",
|
||||
"[Prompt tokens: 234, Completion tokens: 54]\n",
|
||||
"---------- primary ----------\n",
|
||||
"Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\n",
|
||||
"[Prompt tokens: 279, Completion tokens: 39]\n",
|
||||
"---------- Summary ----------\n",
|
||||
"Number of messages: 3\n",
|
||||
"Finish reason: Maximum number of messages 3 reached, current message count: 3\n",
|
||||
"Total prompt tokens: 694\n",
|
||||
"Total completion tokens: 125\n",
|
||||
"Duration: 6.43 seconds\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"TaskResult(messages=[TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=181, completion_tokens=32), content='Thank you for your feedback. Here is the revised Haiku:\\n\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=234, completion_tokens=54), content='The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \\n\\nAPPROVE'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=279, completion_tokens=39), content=\"Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Use asyncio.run(...) if you are running this script as a standalone script.\n",
|
||||
"await Console(round_robin_team.run_stream())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The team continued from where it left off, allowing the primary agent to respond to the feedback."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, let's show how termination conditions can be combined using the AND (`&`) and OR (`|`) operators to create more complex termination logic. For example, we'll create a team that stops either after 10 messages are generated or when the critic agent approves a message.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"---------- user ----------\n",
|
||||
"Write a unique, Haiku about the weather in Paris\n",
|
||||
"---------- primary ----------\n",
|
||||
"Spring breeze gently hums, \n",
|
||||
"Cherry blossoms in full bloom— \n",
|
||||
"Paris wakes to life.\n",
|
||||
"[Prompt tokens: 467, Completion tokens: 19]\n",
|
||||
"---------- critic ----------\n",
|
||||
"The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\n",
|
||||
"\n",
|
||||
"APPROVE\n",
|
||||
"[Prompt tokens: 746, Completion tokens: 93]\n",
|
||||
"---------- Summary ----------\n",
|
||||
"Number of messages: 3\n",
|
||||
"Finish reason: Text 'APPROVE' mentioned\n",
|
||||
"Total prompt tokens: 1213\n",
|
||||
"Total completion tokens: 112\n",
|
||||
"Duration: 2.75 seconds\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=467, completion_tokens=19), content='Spring breeze gently hums, \\nCherry blossoms in full bloom— \\nParis wakes to life.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=746, completion_tokens=93), content='The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\\n\\nAPPROVE')], stop_reason=\"Text 'APPROVE' mentioned\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"max_msg_termination = MaxMessageTermination(max_messages=10)\n",
|
||||
"text_termination = TextMentionTermination(\"APPROVE\")\n",
|
||||
"combined_termination = max_msg_termination | text_termination\n",
|
||||
"\n",
|
||||
"round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=combined_termination)\n",
|
||||
"\n",
|
||||
"# Use asyncio.run(...) if you are running this script as a standalone script.\n",
|
||||
"await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The conversation stopped after the critic agent approved the message, although it could have also stopped if 10 messages were generated.\n",
|
||||
"\n",
|
||||
"Alternatively, if we want to stop the run only when both conditions are met, we can use the AND (`&`) operator."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"combined_termination = max_msg_termination & text_termination"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=30, completion_tokens=19), content='Gentle rain whispers, \\nCobblestones glisten softly— \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=70, completion_tokens=120), content=\"The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\\n\\nFor example:\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.\\n\\nThis revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"max_msg_termination = MaxMessageTermination(max_messages=3)\n",
|
||||
"round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=max_msg_termination)\n",
|
||||
"\n",
|
||||
"# Use asyncio.run(...) if you are running this script as a standalone script.\n",
|
||||
"await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The conversation stopped after reaching the maximum message limit. Since the primary agent didn't get to respond to the feedback, let's continue the conversation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"---------- primary ----------\n",
|
||||
"Thank you for your feedback. Here is the revised Haiku:\n",
|
||||
"\n",
|
||||
"Soft rain whispers down, \n",
|
||||
"Cobblestones glisten softly — \n",
|
||||
"Paris dreams in gray.\n",
|
||||
"[Prompt tokens: 181, Completion tokens: 32]\n",
|
||||
"---------- critic ----------\n",
|
||||
"The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \n",
|
||||
"\n",
|
||||
"APPROVE\n",
|
||||
"[Prompt tokens: 234, Completion tokens: 54]\n",
|
||||
"---------- primary ----------\n",
|
||||
"Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\n",
|
||||
"[Prompt tokens: 279, Completion tokens: 39]\n",
|
||||
"---------- Summary ----------\n",
|
||||
"Number of messages: 3\n",
|
||||
"Finish reason: Maximum number of messages 3 reached, current message count: 3\n",
|
||||
"Total prompt tokens: 694\n",
|
||||
"Total completion tokens: 125\n",
|
||||
"Duration: 6.43 seconds\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"TaskResult(messages=[TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=181, completion_tokens=32), content='Thank you for your feedback. Here is the revised Haiku:\\n\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=234, completion_tokens=54), content='The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \\n\\nAPPROVE'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=279, completion_tokens=39), content=\"Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Use asyncio.run(...) if you are running this script as a standalone script.\n",
|
||||
"await Console(round_robin_team.run_stream())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The team continued from where it left off, allowing the primary agent to respond to the feedback."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, let's show how termination conditions can be combined using the AND (`&`) and OR (`|`) operators to create more complex termination logic. For example, we'll create a team that stops either after 10 messages are generated or when the critic agent approves a message.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"---------- user ----------\n",
|
||||
"Write a unique, Haiku about the weather in Paris\n",
|
||||
"---------- primary ----------\n",
|
||||
"Spring breeze gently hums, \n",
|
||||
"Cherry blossoms in full bloom— \n",
|
||||
"Paris wakes to life.\n",
|
||||
"[Prompt tokens: 467, Completion tokens: 19]\n",
|
||||
"---------- critic ----------\n",
|
||||
"The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\n",
|
||||
"\n",
|
||||
"APPROVE\n",
|
||||
"[Prompt tokens: 746, Completion tokens: 93]\n",
|
||||
"---------- Summary ----------\n",
|
||||
"Number of messages: 3\n",
|
||||
"Finish reason: Text 'APPROVE' mentioned\n",
|
||||
"Total prompt tokens: 1213\n",
|
||||
"Total completion tokens: 112\n",
|
||||
"Duration: 2.75 seconds\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=467, completion_tokens=19), content='Spring breeze gently hums, \\nCherry blossoms in full bloom— \\nParis wakes to life.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=746, completion_tokens=93), content='The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\\n\\nAPPROVE')], stop_reason=\"Text 'APPROVE' mentioned\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"max_msg_termination = MaxMessageTermination(max_messages=10)\n",
|
||||
"text_termination = TextMentionTermination(\"APPROVE\")\n",
|
||||
"combined_termination = max_msg_termination | text_termination\n",
|
||||
"\n",
|
||||
"round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=combined_termination)\n",
|
||||
"\n",
|
||||
"# Use asyncio.run(...) if you are running this script as a standalone script.\n",
|
||||
"await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The conversation stopped after the critic agent approved the message, although it could have also stopped if 10 messages were generated.\n",
|
||||
"\n",
|
||||
"Alternatively, if we want to stop the run only when both conditions are met, we can use the AND (`&`) operator."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"combined_termination = max_msg_termination & text_termination"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
|
|
@ -23,14 +23,14 @@ from autogen_agentchat import EVENT_LOGGER_NAME
|
|||
from autogen_agentchat.agents import BaseChatAgent
|
||||
from autogen_agentchat.base import Response
|
||||
from autogen_agentchat.messages import (
|
||||
AgentMessage,
|
||||
AgentEvent,
|
||||
ChatMessage,
|
||||
HandoffMessage,
|
||||
MultiModalMessage,
|
||||
StopMessage,
|
||||
TextMessage,
|
||||
ToolCallMessage,
|
||||
ToolCallResultMessage,
|
||||
ToolCallRequestEvent,
|
||||
ToolCallExecutionEvent,
|
||||
)
|
||||
from autogen_core import CancellationToken, FunctionCall
|
||||
from autogen_core.models._types import FunctionExecutionResult
|
||||
|
@ -350,7 +350,7 @@ class OpenAIAssistantAgent(BaseChatAgent):
|
|||
|
||||
async def on_messages_stream(
|
||||
self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken
|
||||
) -> AsyncGenerator[AgentMessage | Response, None]:
|
||||
) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:
|
||||
"""Handle incoming messages and return a response."""
|
||||
await self._ensure_initialized()
|
||||
|
||||
|
@ -362,7 +362,7 @@ class OpenAIAssistantAgent(BaseChatAgent):
|
|||
await self.handle_text_message(message.content, cancellation_token)
|
||||
|
||||
# Inner messages for tool calls
|
||||
inner_messages: List[AgentMessage] = []
|
||||
inner_messages: List[AgentEvent | ChatMessage] = []
|
||||
|
||||
# Create and start a run
|
||||
run: Run = await cancellation_token.link_future(
|
||||
|
@ -402,7 +402,7 @@ class OpenAIAssistantAgent(BaseChatAgent):
|
|||
)
|
||||
|
||||
# Add tool call message to inner messages
|
||||
tool_call_msg = ToolCallMessage(source=self.name, content=tool_calls)
|
||||
tool_call_msg = ToolCallRequestEvent(source=self.name, content=tool_calls)
|
||||
inner_messages.append(tool_call_msg)
|
||||
event_logger.debug(tool_call_msg)
|
||||
yield tool_call_msg
|
||||
|
@ -414,7 +414,7 @@ class OpenAIAssistantAgent(BaseChatAgent):
|
|||
tool_outputs.append(FunctionExecutionResult(content=result, call_id=tool_call.id))
|
||||
|
||||
# Add tool result message to inner messages
|
||||
tool_result_msg = ToolCallResultMessage(source=self.name, content=tool_outputs)
|
||||
tool_result_msg = ToolCallExecutionEvent(source=self.name, content=tool_outputs)
|
||||
inner_messages.append(tool_result_msg)
|
||||
event_logger.debug(tool_result_msg)
|
||||
yield tool_result_msg
|
||||
|
|
|
@ -23,7 +23,7 @@ import aiofiles
|
|||
import PIL.Image
|
||||
from autogen_agentchat.agents import BaseChatAgent
|
||||
from autogen_agentchat.base import Response
|
||||
from autogen_agentchat.messages import AgentMessage, ChatMessage, MultiModalMessage, TextMessage
|
||||
from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage, TextMessage
|
||||
from autogen_core import EVENT_LOGGER_NAME, CancellationToken, FunctionCall
|
||||
from autogen_core import Image as AGImage
|
||||
from autogen_core.models import (
|
||||
|
@ -365,13 +365,13 @@ class MultimodalWebSurfer(BaseChatAgent):
|
|||
|
||||
async def on_messages_stream(
|
||||
self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken
|
||||
) -> AsyncGenerator[AgentMessage | Response, None]:
|
||||
) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:
|
||||
for chat_message in messages:
|
||||
if isinstance(chat_message, TextMessage | MultiModalMessage):
|
||||
self._chat_history.append(UserMessage(content=chat_message.content, source=chat_message.source))
|
||||
else:
|
||||
raise ValueError(f"Unexpected message in MultiModalWebSurfer: {chat_message}")
|
||||
self.inner_messages: List[AgentMessage] = []
|
||||
self.inner_messages: List[AgentEvent | ChatMessage] = []
|
||||
self.model_usage: List[RequestUsage] = []
|
||||
try:
|
||||
content = await self._generate_reply(cancellation_token=cancellation_token)
|
||||
|
|
|
@ -2,7 +2,7 @@ import time
|
|||
from typing import AsyncGenerator, Callable, Optional, Union
|
||||
|
||||
from autogen_agentchat.base import TaskResult
|
||||
from autogen_agentchat.messages import AgentMessage, ChatMessage
|
||||
from autogen_agentchat.messages import AgentEvent, ChatMessage
|
||||
from autogen_core import CancellationToken
|
||||
|
||||
from .database import Component, ComponentFactory
|
||||
|
@ -27,7 +27,7 @@ class TeamManager:
|
|||
team_config: ComponentConfigInput,
|
||||
input_func: Optional[Callable] = None,
|
||||
cancellation_token: Optional[CancellationToken] = None,
|
||||
) -> AsyncGenerator[Union[AgentMessage, ChatMessage, TaskResult], None]:
|
||||
) -> AsyncGenerator[Union[AgentEvent | ChatMessage, ChatMessage, TaskResult], None]:
|
||||
"""Stream the team's execution results"""
|
||||
start_time = time.time()
|
||||
|
||||
|
|
|
@ -6,14 +6,14 @@ from uuid import UUID
|
|||
|
||||
from autogen_agentchat.base._task import TaskResult
|
||||
from autogen_agentchat.messages import (
|
||||
AgentMessage,
|
||||
AgentEvent,
|
||||
ChatMessage,
|
||||
HandoffMessage,
|
||||
MultiModalMessage,
|
||||
StopMessage,
|
||||
TextMessage,
|
||||
ToolCallMessage,
|
||||
ToolCallResultMessage,
|
||||
ToolCallRequestEvent,
|
||||
ToolCallExecutionEvent,
|
||||
)
|
||||
from autogen_core import CancellationToken
|
||||
from autogen_core import Image as AGImage
|
||||
|
@ -108,8 +108,8 @@ class WebSocketManager:
|
|||
MultiModalMessage,
|
||||
StopMessage,
|
||||
HandoffMessage,
|
||||
ToolCallMessage,
|
||||
ToolCallResultMessage,
|
||||
ToolCallRequestEvent,
|
||||
ToolCallExecutionEvent,
|
||||
),
|
||||
):
|
||||
await self._save_message(run_id, message)
|
||||
|
@ -141,7 +141,7 @@ class WebSocketManager:
|
|||
finally:
|
||||
self._cancellation_tokens.pop(run_id, None)
|
||||
|
||||
async def _save_message(self, run_id: UUID, message: Union[AgentMessage, ChatMessage]) -> None:
|
||||
async def _save_message(self, run_id: UUID, message: Union[AgentEvent | ChatMessage, ChatMessage]) -> None:
|
||||
"""Save a message to the database"""
|
||||
run = await self._get_run(run_id)
|
||||
if run:
|
||||
|
@ -325,7 +325,7 @@ class WebSocketManager:
|
|||
}
|
||||
|
||||
elif isinstance(
|
||||
message, (TextMessage, StopMessage, HandoffMessage, ToolCallMessage, ToolCallResultMessage)
|
||||
message, (TextMessage, StopMessage, HandoffMessage, ToolCallRequestEvent, ToolCallExecutionEvent)
|
||||
):
|
||||
return {"type": "message", "data": message.model_dump()}
|
||||
|
||||
|
|
|
@ -1,342 +1,342 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## AutoGen Studio Agent Workflow API Example\n",
|
||||
"\n",
|
||||
"This notebook focuses on demonstrating capabilities of the autogen studio workflow python api. \n",
|
||||
"\n",
|
||||
"- Declarative Specification of an Agent Team\n",
|
||||
"- Loading the specification and running the resulting agent\n",
|
||||
"\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"task_result=TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?', type='TextMessage'), ToolCallMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15), content=[FunctionCall(id='call_jcgtAVlBvTFzVpPxKX88Xsa4', arguments='{\"city\":\"New York\"}', name='get_weather')], type='ToolCallMessage'), ToolCallResultMessage(source='writing_agent', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_jcgtAVlBvTFzVpPxKX88Xsa4')], type='ToolCallResultMessage'), TextMessage(source='writing_agent', models_usage=None, content='The weather in New York is 73 degrees and Sunny.', type='TextMessage'), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=103, completion_tokens=14), content='The current weather in New York is 73 degrees and sunny.', type='TextMessage')], stop_reason='Maximum number of messages 5 reached, current message count: 5') usage='' duration=5.103050947189331\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogenstudio.teammanager import TeamManager\n",
|
||||
"\n",
|
||||
"wm = TeamManager()\n",
|
||||
"result = await wm.run(task=\"What is the weather in New York?\", team_config=\"team.json\")\n",
|
||||
"print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"source='user' models_usage=None content='What is the weather in New York?' type='TextMessage'\n",
|
||||
"source='writing_agent' models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15) content=[FunctionCall(id='call_EwdwWogp5jDKdB7t9WGCNjZW', arguments='{\"city\":\"New York\"}', name='get_weather')] type='ToolCallMessage'\n",
|
||||
"source='writing_agent' models_usage=None content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_EwdwWogp5jDKdB7t9WGCNjZW')] type='ToolCallResultMessage'\n",
|
||||
"source='writing_agent' models_usage=None content='The weather in New York is 73 degrees and Sunny.' type='TextMessage'\n",
|
||||
"source='writing_agent' models_usage=RequestUsage(prompt_tokens=103, completion_tokens=14) content='The weather in New York is currently 73 degrees and sunny.' type='TextMessage'\n",
|
||||
"task_result=TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?', type='TextMessage'), ToolCallMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15), content=[FunctionCall(id='call_EwdwWogp5jDKdB7t9WGCNjZW', arguments='{\"city\":\"New York\"}', name='get_weather')], type='ToolCallMessage'), ToolCallResultMessage(source='writing_agent', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_EwdwWogp5jDKdB7t9WGCNjZW')], type='ToolCallResultMessage'), TextMessage(source='writing_agent', models_usage=None, content='The weather in New York is 73 degrees and Sunny.', type='TextMessage'), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=103, completion_tokens=14), content='The weather in New York is currently 73 degrees and sunny.', type='TextMessage')], stop_reason='Maximum number of messages 5 reached, current message count: 5') usage='' duration=1.284574270248413\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result_stream = wm.run_stream(task=\"What is the weather in New York?\", team_config=\"team.json\")\n",
|
||||
"async for response in result_stream:\n",
|
||||
" print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## AutoGen Studio Database API\n",
|
||||
"\n",
|
||||
"Api for creating objects and serializing to a database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Response(message='Database is ready', status=True, data=None)"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogenstudio.database import DatabaseManager\n",
|
||||
"import os\n",
|
||||
"# delete database\n",
|
||||
"# if os.path.exists(\"test.db\"):\n",
|
||||
"# os.remove(\"test.db\")\n",
|
||||
"\n",
|
||||
"os.makedirs(\"test\", exist_ok=True)\n",
|
||||
"# create a database\n",
|
||||
"dbmanager = DatabaseManager(engine_uri=\"sqlite:///test.db\", base_dir=\"test\")\n",
|
||||
"dbmanager.initialize_database()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"from sqlmodel import Session, text, select\n",
|
||||
"from autogenstudio.datamodel.types import ModelTypes, TeamTypes, AgentTypes, ToolConfig, ToolTypes, OpenAIModelConfig, RoundRobinTeamConfig, MaxMessageTerminationConfig, AssistantAgentConfig, TerminationTypes\n",
|
||||
"\n",
|
||||
"from autogenstudio.datamodel.db import Model, Team, Agent, Tool,LinkTypes\n",
|
||||
"\n",
|
||||
"user_id = \"guestuser@gmail.com\" \n",
|
||||
"\n",
|
||||
"gpt4_model = Model(user_id=user_id, config= OpenAIModelConfig(model=\"gpt-4o-2024-08-06\", model_type=ModelTypes.OPENAI).model_dump() )\n",
|
||||
"\n",
|
||||
"weather_tool = Tool(user_id=user_id, config=ToolConfig(name=\"get_weather\", description=\"Get the weather for a city\", content=\"async def get_weather(city: str) -> str:\\n return f\\\"The weather in {city} is 73 degrees and Sunny.\\\"\",tool_type=ToolTypes.PYTHON_FUNCTION).model_dump() )\n",
|
||||
"\n",
|
||||
"adding_tool = Tool(user_id=user_id, config=ToolConfig(name=\"add\", description=\"Add two numbers\", content=\"async def add(a: int, b: int) -> int:\\n return a + b\", tool_type=ToolTypes.PYTHON_FUNCTION).model_dump() )\n",
|
||||
"\n",
|
||||
"writing_agent = Agent(user_id=user_id,\n",
|
||||
" config=AssistantAgentConfig(\n",
|
||||
" name=\"writing_agent\",\n",
|
||||
" tools=[weather_tool.config],\n",
|
||||
" agent_type=AgentTypes.ASSISTANT,\n",
|
||||
" model_client=gpt4_model.config\n",
|
||||
" ).model_dump()\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"team = Team(user_id=user_id, config=RoundRobinTeamConfig(\n",
|
||||
" name=\"weather_team\",\n",
|
||||
" participants=[writing_agent.config],\n",
|
||||
" termination_condition=MaxMessageTerminationConfig(termination_type=TerminationTypes.MAX_MESSAGES, max_messages=5).model_dump(),\n",
|
||||
" team_type=TeamTypes.ROUND_ROBIN\n",
|
||||
" ).model_dump()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"with Session(dbmanager.engine) as session:\n",
|
||||
" session.add(gpt4_model)\n",
|
||||
" session.add(weather_tool)\n",
|
||||
" session.add(adding_tool)\n",
|
||||
" session.add(writing_agent)\n",
|
||||
" session.add(team)\n",
|
||||
" session.commit()\n",
|
||||
"\n",
|
||||
" dbmanager.link(LinkTypes.AGENT_MODEL, writing_agent.id, gpt4_model.id)\n",
|
||||
" dbmanager.link(LinkTypes.AGENT_TOOL, writing_agent.id, weather_tool.id)\n",
|
||||
" dbmanager.link(LinkTypes.AGENT_TOOL, writing_agent.id, adding_tool.id)\n",
|
||||
" dbmanager.link(LinkTypes.TEAM_AGENT, team.id, writing_agent.id)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2 teams in database\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"all_teams = dbmanager.get(Team)\n",
|
||||
"print(len(all_teams.data), \"teams in database\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Configuration Manager\n",
|
||||
"\n",
|
||||
"Helper class to mostly import teams/agents/models/tools etc into a database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogenstudio.database import ConfigurationManager\n",
|
||||
"\n",
|
||||
"config_manager = ConfigurationManager(dbmanager)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"message='Team Created Successfully' status=True data={'id': 4, 'updated_at': datetime.datetime(2024, 12, 15, 15, 52, 21, 674916), 'version': '0.0.1', 'created_at': datetime.datetime(2024, 12, 15, 15, 52, 21, 674910), 'user_id': 'user_id', 'config': {'version': '1.0.0', 'component_type': 'team', 'name': 'weather_team', 'participants': [{'version': '1.0.0', 'component_type': 'agent', 'name': 'writing_agent', 'agent_type': 'AssistantAgent', 'description': None, 'model_client': {'version': '1.0.0', 'component_type': 'model', 'model': 'gpt-4o-2024-08-06', 'model_type': 'OpenAIChatCompletionClient', 'api_key': None, 'base_url': None}, 'tools': [{'version': '1.0.0', 'component_type': 'tool', 'name': 'get_weather', 'description': 'Get the weather for a city', 'content': 'async def get_weather(city: str) -> str:\\n return f\"The weather in {city} is 73 degrees and Sunny.\"', 'tool_type': 'PythonFunction'}], 'system_message': None}], 'team_type': 'RoundRobinGroupChat', 'termination_condition': {'version': '1.0.0', 'component_type': 'termination', 'termination_type': 'MaxMessageTermination', 'max_messages': 5}, 'max_turns': None}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result = await config_manager.import_component(\"team.json\", user_id=\"user_id\", check_exists=True)\n",
|
||||
"print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"message='Directory import complete' status=True data=[{'component': 'team', 'status': True, 'message': 'Team Created Successfully', 'id': 5}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result = await config_manager.import_directory(\".\", user_id=\"user_id\", check_exists=False)\n",
|
||||
"print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"5 teams in database\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"all_teams = dbmanager.get(Team)\n",
|
||||
"print(len(all_teams.data), \"teams in database\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Sample AgentChat Example (Python)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_agentchat.agents import AssistantAgent\n",
|
||||
"from autogen_agentchat.conditions import TextMentionTermination\n",
|
||||
"from autogen_agentchat.teams import RoundRobinGroupChat, SelectorGroupChat\n",
|
||||
"from autogen_ext.models.openai import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"planner_agent = AssistantAgent(\n",
|
||||
" \"planner_agent\",\n",
|
||||
" model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n",
|
||||
" description=\"A helpful assistant that can plan trips.\",\n",
|
||||
" system_message=\"You are a helpful assistant that can suggest a travel plan for a user based on their request. Respond with a single sentence\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"local_agent = AssistantAgent(\n",
|
||||
" \"local_agent\",\n",
|
||||
" model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n",
|
||||
" description=\"A local assistant that can suggest local activities or places to visit.\",\n",
|
||||
" system_message=\"You are a helpful assistant that can suggest authentic and interesting local activities or places to visit for a user and can utilize any context information provided. Respond with a single sentence\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"language_agent = AssistantAgent(\n",
|
||||
" \"language_agent\",\n",
|
||||
" model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n",
|
||||
" description=\"A helpful assistant that can provide language tips for a given destination.\",\n",
|
||||
" system_message=\"You are a helpful assistant that can review travel plans, providing feedback on important/critical tips about how best to address language or communication challenges for the given destination. If the plan already includes language tips, you can mention that the plan is satisfactory, with rationale.Respond with a single sentence\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"travel_summary_agent = AssistantAgent(\n",
|
||||
" \"travel_summary_agent\",\n",
|
||||
" model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n",
|
||||
" description=\"A helpful assistant that can summarize the travel plan.\",\n",
|
||||
" system_message=\"You are a helpful assistant that can take in all of the suggestions and advice from the other agents and provide a detailed tfinal travel plan. You must ensure th b at the final plan is integrated and complete. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN. When the plan is complete and all perspectives are integrated, you can respond with TERMINATE.Respond with a single sentence\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"termination = TextMentionTermination(\"TERMINATE\")\n",
|
||||
"group_chat = RoundRobinGroupChat(\n",
|
||||
" [planner_agent, local_agent, language_agent, travel_summary_agent], termination_condition=termination\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"source='user' models_usage=None content='Plan a 3 day trip to Nepal.' type='TextMessage'\n",
|
||||
"source='planner_agent' models_usage=RequestUsage(prompt_tokens=45, completion_tokens=53) content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.' type='TextMessage'\n",
|
||||
"source='local_agent' models_usage=RequestUsage(prompt_tokens=115, completion_tokens=53) content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.' type='TextMessage'\n",
|
||||
"source='language_agent' models_usage=RequestUsage(prompt_tokens=199, completion_tokens=42) content=\"For your trip to Nepal, it's crucial to learn some phrases in Nepali since English is not widely spoken outside of major cities and tourist areas; even a simple phrasebook or translation app would be beneficial.\" type='TextMessage'\n",
|
||||
"source='travel_summary_agent' models_usage=RequestUsage(prompt_tokens=265, completion_tokens=298) content=\"Day 1: Begin your journey in Kathmandu, where you can visit the historic Durbar Square, a UNESCO World Heritage site that showcases intricate woodcarving and houses the iconic Kasthamandap Temple. From there, proceed to the sacred Pashupatinath Temple, a significant Hindu pilgrimage site on the banks of the holy Bagmati River.\\n\\nDay 2: Embark on an early morning scenic flight over the Everest range. This one-hour flight provides a breathtaking view of the world's highest peak along with other neighboring peaks. Standard flights depart from Tribhuvan International Airport between 6:30 AM to 7:30 AM depending on the weather. Spend the remainder of the day exploring the local markets in Kathmandu, sampling a variety of Nepalese cuisines and shopping for unique souvenirs.\\n\\nDay 3: Finally, take a short flight or drive to Pokhara, the gateway to the Annapurna region. Embark on a guided hike enjoying the stunning backdrop of the Annapurna ranges and the serene Phewa lake.\\n\\nRemember to bring along a phrasebook or translation app, as English is not widely spoken in Nepal, particularly outside of major cities and tourist hotspots. \\n\\nPack comfortable trekking gear, adequate water, medical and emergency supplies. It's also advisable to check on the weather updates, as conditions can change rapidly, particularly in mountainous areas. Enjoy your Nepal expedition!TERMINATE\" type='TextMessage'\n",
|
||||
"TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Plan a 3 day trip to Nepal.', type='TextMessage'), TextMessage(source='planner_agent', models_usage=RequestUsage(prompt_tokens=45, completion_tokens=53), content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.', type='TextMessage'), TextMessage(source='local_agent', models_usage=RequestUsage(prompt_tokens=115, completion_tokens=53), content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.', type='TextMessage'), TextMessage(source='language_agent', models_usage=RequestUsage(prompt_tokens=199, completion_tokens=42), content=\"For your trip to Nepal, it's crucial to learn some phrases in Nepali since English is not widely spoken outside of major cities and tourist areas; even a simple phrasebook or translation app would be beneficial.\", type='TextMessage'), TextMessage(source='travel_summary_agent', models_usage=RequestUsage(prompt_tokens=265, completion_tokens=298), content=\"Day 1: Begin your journey in Kathmandu, where you can visit the historic Durbar Square, a UNESCO World Heritage site that showcases intricate woodcarving and houses the iconic Kasthamandap Temple. From there, proceed to the sacred Pashupatinath Temple, a significant Hindu pilgrimage site on the banks of the holy Bagmati River.\\n\\nDay 2: Embark on an early morning scenic flight over the Everest range. This one-hour flight provides a breathtaking view of the world's highest peak along with other neighboring peaks. Standard flights depart from Tribhuvan International Airport between 6:30 AM to 7:30 AM depending on the weather. Spend the remainder of the day exploring the local markets in Kathmandu, sampling a variety of Nepalese cuisines and shopping for unique souvenirs.\\n\\nDay 3: Finally, take a short flight or drive to Pokhara, the gateway to the Annapurna region. Embark on a guided hike enjoying the stunning backdrop of the Annapurna ranges and the serene Phewa lake.\\n\\nRemember to bring along a phrasebook or translation app, as English is not widely spoken in Nepal, particularly outside of major cities and tourist hotspots. \\n\\nPack comfortable trekking gear, adequate water, medical and emergency supplies. It's also advisable to check on the weather updates, as conditions can change rapidly, particularly in mountainous areas. Enjoy your Nepal expedition!TERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"result = group_chat.run_stream(task=\"Plan a 3 day trip to Nepal.\")\n",
|
||||
"async for response in result:\n",
|
||||
" print(response)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "agnext",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## AutoGen Studio Agent Workflow API Example\n",
|
||||
"\n",
|
||||
"This notebook focuses on demonstrating capabilities of the autogen studio workflow python api. \n",
|
||||
"\n",
|
||||
"- Declarative Specification of an Agent Team\n",
|
||||
"- Loading the specification and running the resulting agent\n",
|
||||
"\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"task_result=TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?', type='TextMessage'), ToolCallRequestEvent(source='writing_agent', models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15), content=[FunctionCall(id='call_jcgtAVlBvTFzVpPxKX88Xsa4', arguments='{\"city\":\"New York\"}', name='get_weather')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='writing_agent', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_jcgtAVlBvTFzVpPxKX88Xsa4')], type='ToolCallExecutionEvent'), TextMessage(source='writing_agent', models_usage=None, content='The weather in New York is 73 degrees and Sunny.', type='TextMessage'), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=103, completion_tokens=14), content='The current weather in New York is 73 degrees and sunny.', type='TextMessage')], stop_reason='Maximum number of messages 5 reached, current message count: 5') usage='' duration=5.103050947189331\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogenstudio.teammanager import TeamManager\n",
|
||||
"\n",
|
||||
"wm = TeamManager()\n",
|
||||
"result = await wm.run(task=\"What is the weather in New York?\", team_config=\"team.json\")\n",
|
||||
"print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"source='user' models_usage=None content='What is the weather in New York?' type='TextMessage'\n",
|
||||
"source='writing_agent' models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15) content=[FunctionCall(id='call_EwdwWogp5jDKdB7t9WGCNjZW', arguments='{\"city\":\"New York\"}', name='get_weather')] type='ToolCallRequestEvent'\n",
|
||||
"source='writing_agent' models_usage=None content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_EwdwWogp5jDKdB7t9WGCNjZW')] type='ToolCallExecutionEvent'\n",
|
||||
"source='writing_agent' models_usage=None content='The weather in New York is 73 degrees and Sunny.' type='TextMessage'\n",
|
||||
"source='writing_agent' models_usage=RequestUsage(prompt_tokens=103, completion_tokens=14) content='The weather in New York is currently 73 degrees and sunny.' type='TextMessage'\n",
|
||||
"task_result=TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?', type='TextMessage'), ToolCallRequestEvent(source='writing_agent', models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15), content=[FunctionCall(id='call_EwdwWogp5jDKdB7t9WGCNjZW', arguments='{\"city\":\"New York\"}', name='get_weather')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='writing_agent', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_EwdwWogp5jDKdB7t9WGCNjZW')], type='ToolCallExecutionEvent'), TextMessage(source='writing_agent', models_usage=None, content='The weather in New York is 73 degrees and Sunny.', type='TextMessage'), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=103, completion_tokens=14), content='The weather in New York is currently 73 degrees and sunny.', type='TextMessage')], stop_reason='Maximum number of messages 5 reached, current message count: 5') usage='' duration=1.284574270248413\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result_stream = wm.run_stream(task=\"What is the weather in New York?\", team_config=\"team.json\")\n",
|
||||
"async for response in result_stream:\n",
|
||||
" print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## AutoGen Studio Database API\n",
|
||||
"\n",
|
||||
"Api for creating objects and serializing to a database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Response(message='Database is ready', status=True, data=None)"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogenstudio.database import DatabaseManager\n",
|
||||
"import os\n",
|
||||
"# delete database\n",
|
||||
"# if os.path.exists(\"test.db\"):\n",
|
||||
"# os.remove(\"test.db\")\n",
|
||||
"\n",
|
||||
"os.makedirs(\"test\", exist_ok=True)\n",
|
||||
"# create a database\n",
|
||||
"dbmanager = DatabaseManager(engine_uri=\"sqlite:///test.db\", base_dir=\"test\")\n",
|
||||
"dbmanager.initialize_database()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"from sqlmodel import Session, text, select\n",
|
||||
"from autogenstudio.datamodel.types import ModelTypes, TeamTypes, AgentTypes, ToolConfig, ToolTypes, OpenAIModelConfig, RoundRobinTeamConfig, MaxMessageTerminationConfig, AssistantAgentConfig, TerminationTypes\n",
|
||||
"\n",
|
||||
"from autogenstudio.datamodel.db import Model, Team, Agent, Tool,LinkTypes\n",
|
||||
"\n",
|
||||
"user_id = \"guestuser@gmail.com\" \n",
|
||||
"\n",
|
||||
"gpt4_model = Model(user_id=user_id, config= OpenAIModelConfig(model=\"gpt-4o-2024-08-06\", model_type=ModelTypes.OPENAI).model_dump() )\n",
|
||||
"\n",
|
||||
"weather_tool = Tool(user_id=user_id, config=ToolConfig(name=\"get_weather\", description=\"Get the weather for a city\", content=\"async def get_weather(city: str) -> str:\\n return f\\\"The weather in {city} is 73 degrees and Sunny.\\\"\",tool_type=ToolTypes.PYTHON_FUNCTION).model_dump() )\n",
|
||||
"\n",
|
||||
"adding_tool = Tool(user_id=user_id, config=ToolConfig(name=\"add\", description=\"Add two numbers\", content=\"async def add(a: int, b: int) -> int:\\n return a + b\", tool_type=ToolTypes.PYTHON_FUNCTION).model_dump() )\n",
|
||||
"\n",
|
||||
"writing_agent = Agent(user_id=user_id,\n",
|
||||
" config=AssistantAgentConfig(\n",
|
||||
" name=\"writing_agent\",\n",
|
||||
" tools=[weather_tool.config],\n",
|
||||
" agent_type=AgentTypes.ASSISTANT,\n",
|
||||
" model_client=gpt4_model.config\n",
|
||||
" ).model_dump()\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"team = Team(user_id=user_id, config=RoundRobinTeamConfig(\n",
|
||||
" name=\"weather_team\",\n",
|
||||
" participants=[writing_agent.config],\n",
|
||||
" termination_condition=MaxMessageTerminationConfig(termination_type=TerminationTypes.MAX_MESSAGES, max_messages=5).model_dump(),\n",
|
||||
" team_type=TeamTypes.ROUND_ROBIN\n",
|
||||
" ).model_dump()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"with Session(dbmanager.engine) as session:\n",
|
||||
" session.add(gpt4_model)\n",
|
||||
" session.add(weather_tool)\n",
|
||||
" session.add(adding_tool)\n",
|
||||
" session.add(writing_agent)\n",
|
||||
" session.add(team)\n",
|
||||
" session.commit()\n",
|
||||
"\n",
|
||||
" dbmanager.link(LinkTypes.AGENT_MODEL, writing_agent.id, gpt4_model.id)\n",
|
||||
" dbmanager.link(LinkTypes.AGENT_TOOL, writing_agent.id, weather_tool.id)\n",
|
||||
" dbmanager.link(LinkTypes.AGENT_TOOL, writing_agent.id, adding_tool.id)\n",
|
||||
" dbmanager.link(LinkTypes.TEAM_AGENT, team.id, writing_agent.id)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2 teams in database\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"all_teams = dbmanager.get(Team)\n",
|
||||
"print(len(all_teams.data), \"teams in database\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Configuration Manager\n",
|
||||
"\n",
|
||||
"Helper class to mostly import teams/agents/models/tools etc into a database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogenstudio.database import ConfigurationManager\n",
|
||||
"\n",
|
||||
"config_manager = ConfigurationManager(dbmanager)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"message='Team Created Successfully' status=True data={'id': 4, 'updated_at': datetime.datetime(2024, 12, 15, 15, 52, 21, 674916), 'version': '0.0.1', 'created_at': datetime.datetime(2024, 12, 15, 15, 52, 21, 674910), 'user_id': 'user_id', 'config': {'version': '1.0.0', 'component_type': 'team', 'name': 'weather_team', 'participants': [{'version': '1.0.0', 'component_type': 'agent', 'name': 'writing_agent', 'agent_type': 'AssistantAgent', 'description': None, 'model_client': {'version': '1.0.0', 'component_type': 'model', 'model': 'gpt-4o-2024-08-06', 'model_type': 'OpenAIChatCompletionClient', 'api_key': None, 'base_url': None}, 'tools': [{'version': '1.0.0', 'component_type': 'tool', 'name': 'get_weather', 'description': 'Get the weather for a city', 'content': 'async def get_weather(city: str) -> str:\\n return f\"The weather in {city} is 73 degrees and Sunny.\"', 'tool_type': 'PythonFunction'}], 'system_message': None}], 'team_type': 'RoundRobinGroupChat', 'termination_condition': {'version': '1.0.0', 'component_type': 'termination', 'termination_type': 'MaxMessageTermination', 'max_messages': 5}, 'max_turns': None}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result = await config_manager.import_component(\"team.json\", user_id=\"user_id\", check_exists=True)\n",
|
||||
"print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"message='Directory import complete' status=True data=[{'component': 'team', 'status': True, 'message': 'Team Created Successfully', 'id': 5}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result = await config_manager.import_directory(\".\", user_id=\"user_id\", check_exists=False)\n",
|
||||
"print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"5 teams in database\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"all_teams = dbmanager.get(Team)\n",
|
||||
"print(len(all_teams.data), \"teams in database\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Sample AgentChat Example (Python)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_agentchat.agents import AssistantAgent\n",
|
||||
"from autogen_agentchat.conditions import TextMentionTermination\n",
|
||||
"from autogen_agentchat.teams import RoundRobinGroupChat, SelectorGroupChat\n",
|
||||
"from autogen_ext.models.openai import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"planner_agent = AssistantAgent(\n",
|
||||
" \"planner_agent\",\n",
|
||||
" model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n",
|
||||
" description=\"A helpful assistant that can plan trips.\",\n",
|
||||
" system_message=\"You are a helpful assistant that can suggest a travel plan for a user based on their request. Respond with a single sentence\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"local_agent = AssistantAgent(\n",
|
||||
" \"local_agent\",\n",
|
||||
" model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n",
|
||||
" description=\"A local assistant that can suggest local activities or places to visit.\",\n",
|
||||
" system_message=\"You are a helpful assistant that can suggest authentic and interesting local activities or places to visit for a user and can utilize any context information provided. Respond with a single sentence\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"language_agent = AssistantAgent(\n",
|
||||
" \"language_agent\",\n",
|
||||
" model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n",
|
||||
" description=\"A helpful assistant that can provide language tips for a given destination.\",\n",
|
||||
" system_message=\"You are a helpful assistant that can review travel plans, providing feedback on important/critical tips about how best to address language or communication challenges for the given destination. If the plan already includes language tips, you can mention that the plan is satisfactory, with rationale.Respond with a single sentence\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"travel_summary_agent = AssistantAgent(\n",
|
||||
" \"travel_summary_agent\",\n",
|
||||
" model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n",
|
||||
" description=\"A helpful assistant that can summarize the travel plan.\",\n",
|
||||
" system_message=\"You are a helpful assistant that can take in all of the suggestions and advice from the other agents and provide a detailed tfinal travel plan. You must ensure th b at the final plan is integrated and complete. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN. When the plan is complete and all perspectives are integrated, you can respond with TERMINATE.Respond with a single sentence\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"termination = TextMentionTermination(\"TERMINATE\")\n",
|
||||
"group_chat = RoundRobinGroupChat(\n",
|
||||
" [planner_agent, local_agent, language_agent, travel_summary_agent], termination_condition=termination\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"source='user' models_usage=None content='Plan a 3 day trip to Nepal.' type='TextMessage'\n",
|
||||
"source='planner_agent' models_usage=RequestUsage(prompt_tokens=45, completion_tokens=53) content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.' type='TextMessage'\n",
|
||||
"source='local_agent' models_usage=RequestUsage(prompt_tokens=115, completion_tokens=53) content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.' type='TextMessage'\n",
|
||||
"source='language_agent' models_usage=RequestUsage(prompt_tokens=199, completion_tokens=42) content=\"For your trip to Nepal, it's crucial to learn some phrases in Nepali since English is not widely spoken outside of major cities and tourist areas; even a simple phrasebook or translation app would be beneficial.\" type='TextMessage'\n",
|
||||
"source='travel_summary_agent' models_usage=RequestUsage(prompt_tokens=265, completion_tokens=298) content=\"Day 1: Begin your journey in Kathmandu, where you can visit the historic Durbar Square, a UNESCO World Heritage site that showcases intricate woodcarving and houses the iconic Kasthamandap Temple. From there, proceed to the sacred Pashupatinath Temple, a significant Hindu pilgrimage site on the banks of the holy Bagmati River.\\n\\nDay 2: Embark on an early morning scenic flight over the Everest range. This one-hour flight provides a breathtaking view of the world's highest peak along with other neighboring peaks. Standard flights depart from Tribhuvan International Airport between 6:30 AM to 7:30 AM depending on the weather. Spend the remainder of the day exploring the local markets in Kathmandu, sampling a variety of Nepalese cuisines and shopping for unique souvenirs.\\n\\nDay 3: Finally, take a short flight or drive to Pokhara, the gateway to the Annapurna region. Embark on a guided hike enjoying the stunning backdrop of the Annapurna ranges and the serene Phewa lake.\\n\\nRemember to bring along a phrasebook or translation app, as English is not widely spoken in Nepal, particularly outside of major cities and tourist hotspots. \\n\\nPack comfortable trekking gear, adequate water, medical and emergency supplies. It's also advisable to check on the weather updates, as conditions can change rapidly, particularly in mountainous areas. Enjoy your Nepal expedition!TERMINATE\" type='TextMessage'\n",
|
||||
"TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Plan a 3 day trip to Nepal.', type='TextMessage'), TextMessage(source='planner_agent', models_usage=RequestUsage(prompt_tokens=45, completion_tokens=53), content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.', type='TextMessage'), TextMessage(source='local_agent', models_usage=RequestUsage(prompt_tokens=115, completion_tokens=53), content='I recommend starting your trip in Kathmandu, where you can explore the historic Durbar Square and Pashupatinath Temple, then take a scenic flight over the Everest range, and finish your journey with a stunning hike in the Annapurna region.', type='TextMessage'), TextMessage(source='language_agent', models_usage=RequestUsage(prompt_tokens=199, completion_tokens=42), content=\"For your trip to Nepal, it's crucial to learn some phrases in Nepali since English is not widely spoken outside of major cities and tourist areas; even a simple phrasebook or translation app would be beneficial.\", type='TextMessage'), TextMessage(source='travel_summary_agent', models_usage=RequestUsage(prompt_tokens=265, completion_tokens=298), content=\"Day 1: Begin your journey in Kathmandu, where you can visit the historic Durbar Square, a UNESCO World Heritage site that showcases intricate woodcarving and houses the iconic Kasthamandap Temple. From there, proceed to the sacred Pashupatinath Temple, a significant Hindu pilgrimage site on the banks of the holy Bagmati River.\\n\\nDay 2: Embark on an early morning scenic flight over the Everest range. This one-hour flight provides a breathtaking view of the world's highest peak along with other neighboring peaks. Standard flights depart from Tribhuvan International Airport between 6:30 AM to 7:30 AM depending on the weather. Spend the remainder of the day exploring the local markets in Kathmandu, sampling a variety of Nepalese cuisines and shopping for unique souvenirs.\\n\\nDay 3: Finally, take a short flight or drive to Pokhara, the gateway to the Annapurna region. Embark on a guided hike enjoying the stunning backdrop of the Annapurna ranges and the serene Phewa lake.\\n\\nRemember to bring along a phrasebook or translation app, as English is not widely spoken in Nepal, particularly outside of major cities and tourist hotspots. \\n\\nPack comfortable trekking gear, adequate water, medical and emergency supplies. It's also advisable to check on the weather updates, as conditions can change rapidly, particularly in mountainous areas. Enjoy your Nepal expedition!TERMINATE\", type='TextMessage')], stop_reason=\"Text 'TERMINATE' mentioned\")\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"result = group_chat.run_stream(task=\"Plan a 3 day trip to Nepal.\")\n",
|
||||
"async for response in result:\n",
|
||||
" print(response)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "agnext",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue