mirror of https://github.com/microsoft/autogen.git
Compare commits
15 Commits
f66dda3ee7
...
0cf749441f
Author | SHA1 | Date |
---|---|---|
![]() |
0cf749441f | |
![]() |
71b7429a42 | |
![]() |
88dda88f53 | |
![]() |
8e9154d0df | |
![]() |
646e75ff9f | |
![]() |
e6ccd5baa8 | |
![]() |
7e8472f99b | |
![]() |
756aef366d | |
![]() |
b1c1b5c6fb | |
![]() |
71a4eaedf9 | |
![]() |
95b1ed5d81 | |
![]() |
c75515990e | |
![]() |
3500170be1 | |
![]() |
a6e3485d0e | |
![]() |
3425d7dc2c |
|
@ -90,6 +90,7 @@ body:
|
||||||
multiple: false
|
multiple: false
|
||||||
options:
|
options:
|
||||||
- "Python dev (main branch)"
|
- "Python dev (main branch)"
|
||||||
|
- "Python 0.5.2"
|
||||||
- "Python 0.5.1"
|
- "Python 0.5.1"
|
||||||
- "Python 0.4.9"
|
- "Python 0.4.9"
|
||||||
- "Python 0.4.8"
|
- "Python 0.4.8"
|
||||||
|
|
|
@ -33,7 +33,7 @@ jobs:
|
||||||
[
|
[
|
||||||
# For main use the workflow target
|
# For main use the workflow target
|
||||||
{ ref: "${{github.ref}}", dest-dir: dev, uv-version: "0.5.13", sphinx-release-override: "dev" },
|
{ ref: "${{github.ref}}", dest-dir: dev, uv-version: "0.5.13", sphinx-release-override: "dev" },
|
||||||
{ ref: "python-v0.5.1", dest-dir: stable, uv-version: "0.5.13", sphinx-release-override: "stable" },
|
{ ref: "python-v0.5.2", dest-dir: stable, uv-version: "0.5.13", sphinx-release-override: "stable" },
|
||||||
{ ref: "v0.4.0.post1", dest-dir: "0.4.0", uv-version: "0.5.13", sphinx-release-override: "" },
|
{ ref: "v0.4.0.post1", dest-dir: "0.4.0", uv-version: "0.5.13", sphinx-release-override: "" },
|
||||||
{ ref: "v0.4.1", dest-dir: "0.4.1", uv-version: "0.5.13", sphinx-release-override: "" },
|
{ ref: "v0.4.1", dest-dir: "0.4.1", uv-version: "0.5.13", sphinx-release-override: "" },
|
||||||
{ ref: "v0.4.2", dest-dir: "0.4.2", uv-version: "0.5.13", sphinx-release-override: "" },
|
{ ref: "v0.4.2", dest-dir: "0.4.2", uv-version: "0.5.13", sphinx-release-override: "" },
|
||||||
|
@ -45,6 +45,7 @@ jobs:
|
||||||
{ ref: "python-v0.4.8", dest-dir: "0.4.8", uv-version: "0.5.13", sphinx-release-override: "" },
|
{ ref: "python-v0.4.8", dest-dir: "0.4.8", uv-version: "0.5.13", sphinx-release-override: "" },
|
||||||
{ ref: "python-v0.4.9-website", dest-dir: "0.4.9", uv-version: "0.5.13", sphinx-release-override: "" },
|
{ ref: "python-v0.4.9-website", dest-dir: "0.4.9", uv-version: "0.5.13", sphinx-release-override: "" },
|
||||||
{ ref: "python-v0.5.1", dest-dir: "0.5.1", uv-version: "0.5.13", sphinx-release-override: "" },
|
{ ref: "python-v0.5.1", dest-dir: "0.5.1", uv-version: "0.5.13", sphinx-release-override: "" },
|
||||||
|
{ ref: "python-v0.5.2", dest-dir: "0.5.2", uv-version: "0.5.13", sphinx-release-override: "" },
|
||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
|
|
|
@ -5,11 +5,16 @@
|
||||||
"url": "/autogen/dev/"
|
"url": "/autogen/dev/"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "0.5.1 (stable)",
|
"name": "0.5.2 (stable)",
|
||||||
"version": "stable",
|
"version": "stable",
|
||||||
"url": "/autogen/stable/",
|
"url": "/autogen/stable/",
|
||||||
"preferred": true
|
"preferred": true
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"name": "0.5.1",
|
||||||
|
"version": "0.5.1",
|
||||||
|
"url": "/autogen/0.5.1/"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "0.4.9",
|
"name": "0.4.9",
|
||||||
"version": "0.4.9",
|
"version": "0.4.9",
|
||||||
|
|
|
@ -14,7 +14,9 @@ public static class DotnetInteractiveKernelBuilder
|
||||||
|
|
||||||
public static InProccessDotnetInteractiveKernelBuilder CreateDefaultInProcessKernelBuilder()
|
public static InProccessDotnetInteractiveKernelBuilder CreateDefaultInProcessKernelBuilder()
|
||||||
{
|
{
|
||||||
return new InProccessDotnetInteractiveKernelBuilder();
|
return new InProccessDotnetInteractiveKernelBuilder()
|
||||||
|
.AddCSharpKernel()
|
||||||
|
.AddFSharpKernel();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "autogen-agentchat"
|
name = "autogen-agentchat"
|
||||||
version = "0.5.1"
|
version = "0.5.2"
|
||||||
license = {file = "LICENSE-CODE"}
|
license = {file = "LICENSE-CODE"}
|
||||||
description = "AutoGen agents and teams library"
|
description = "AutoGen agents and teams library"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
@ -15,7 +15,7 @@ classifiers = [
|
||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
]
|
]
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"autogen-core==0.5.1",
|
"autogen-core==0.5.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[tool.ruff]
|
[tool.ruff]
|
||||||
|
|
|
@ -1,45 +1,108 @@
|
||||||
|
import logging
|
||||||
import re
|
import re
|
||||||
from typing import List, Sequence
|
from typing import (
|
||||||
|
AsyncGenerator,
|
||||||
|
List,
|
||||||
|
Optional,
|
||||||
|
Sequence,
|
||||||
|
Union,
|
||||||
|
)
|
||||||
|
|
||||||
from autogen_core import CancellationToken, Component, ComponentModel
|
from autogen_core import CancellationToken, Component, ComponentModel
|
||||||
from autogen_core.code_executor import CodeBlock, CodeExecutor
|
from autogen_core.code_executor import CodeBlock, CodeExecutor
|
||||||
|
from autogen_core.memory import Memory
|
||||||
|
from autogen_core.model_context import (
|
||||||
|
ChatCompletionContext,
|
||||||
|
UnboundedChatCompletionContext,
|
||||||
|
)
|
||||||
|
from autogen_core.models import (
|
||||||
|
AssistantMessage,
|
||||||
|
ChatCompletionClient,
|
||||||
|
CreateResult,
|
||||||
|
LLMMessage,
|
||||||
|
SystemMessage,
|
||||||
|
UserMessage,
|
||||||
|
)
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
from typing_extensions import Self
|
from typing_extensions import Self
|
||||||
|
|
||||||
|
from .. import EVENT_LOGGER_NAME
|
||||||
from ..base import Response
|
from ..base import Response
|
||||||
from ..messages import BaseChatMessage, TextMessage
|
from ..messages import (
|
||||||
|
BaseAgentEvent,
|
||||||
|
BaseChatMessage,
|
||||||
|
CodeExecutionEvent,
|
||||||
|
CodeGenerationEvent,
|
||||||
|
HandoffMessage,
|
||||||
|
MemoryQueryEvent,
|
||||||
|
ModelClientStreamingChunkEvent,
|
||||||
|
TextMessage,
|
||||||
|
ThoughtEvent,
|
||||||
|
)
|
||||||
|
from ..utils import remove_images
|
||||||
from ._base_chat_agent import BaseChatAgent
|
from ._base_chat_agent import BaseChatAgent
|
||||||
|
|
||||||
|
event_logger = logging.getLogger(EVENT_LOGGER_NAME)
|
||||||
|
|
||||||
|
|
||||||
class CodeExecutorAgentConfig(BaseModel):
|
class CodeExecutorAgentConfig(BaseModel):
|
||||||
"""Configuration for CodeExecutorAgent"""
|
"""Configuration for CodeExecutorAgent"""
|
||||||
|
|
||||||
name: str
|
name: str
|
||||||
code_executor: ComponentModel
|
code_executor: ComponentModel
|
||||||
description: str = "A computer terminal that performs no other action than running Python scripts (provided to it quoted in ```python code blocks), or sh shell scripts (provided to it quoted in ```sh code blocks)."
|
model_client: ComponentModel | None = None
|
||||||
|
description: str | None = None
|
||||||
sources: List[str] | None = None
|
sources: List[str] | None = None
|
||||||
|
system_message: str | None = None
|
||||||
|
model_client_stream: bool = False
|
||||||
|
model_context: ComponentModel | None = None
|
||||||
|
|
||||||
|
|
||||||
class CodeExecutorAgent(BaseChatAgent, Component[CodeExecutorAgentConfig]):
|
class CodeExecutorAgent(BaseChatAgent, Component[CodeExecutorAgentConfig]):
|
||||||
"""An agent that extracts and executes code snippets found in received
|
"""(Experimental) An agent that generates and executes code snippets based on user instructions.
|
||||||
:class:`~autogen_agentchat.messages.TextMessage` messages and returns the output
|
|
||||||
of the code execution.
|
|
||||||
|
|
||||||
It is typically used within a team with another agent that generates code snippets to be executed.
|
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
Consider :class:`~autogen_ext.tools.code_execution.PythonCodeExecutionTool`
|
This agent is experimental and may change in future releases.
|
||||||
as an alternative to this agent. The tool allows for executing Python code
|
|
||||||
within a single agent, rather than sending it to a separate agent for execution.
|
It is typically used within a team with another agent that generates code snippets
|
||||||
However, the model for the agent will have to generate properly escaped code
|
to be executed or alone with `model_client` provided so that it can generate code
|
||||||
string as a parameter to the tool.
|
based on user query, execute it and reflect on the code result.
|
||||||
|
|
||||||
|
When used with `model_client`, it will generate code snippets using the model
|
||||||
|
and execute them using the provided `code_executor`. The model will also reflect on the
|
||||||
|
code execution results. The agent will yield the final reflection result from the model
|
||||||
|
as the final response.
|
||||||
|
|
||||||
|
When used without `model_client`, it will only execute code blocks found in
|
||||||
|
:class:`~autogen_agentchat.messages.TextMessage` messages and returns the output
|
||||||
|
of the code execution.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Using :class:`~autogen_agentchat.agents.AssistantAgent` with
|
||||||
|
:class:`~autogen_ext.tools.code_execution.PythonCodeExecutionTool`
|
||||||
|
is an alternative to this agent. However, the model for that agent will
|
||||||
|
have to generate properly escaped code string as a parameter to the tool.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
name: The name of the agent.
|
name (str): The name of the agent.
|
||||||
code_executor: The CodeExecutor responsible for executing code received in messages (:py:class:`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor` recommended. See example below)
|
code_executor (CodeExecutor): The code executor responsible for executing code received in messages
|
||||||
description (optional): The description of the agent.
|
(:py:class:`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor` recommended. See example below)
|
||||||
sources (optional): Check only messages from the specified agents for the code to execute.
|
model_client (ChatCompletionClient, optional): The model client to use for inference and generating code.
|
||||||
|
If not provided, the agent will only execute code blocks found in input messages.
|
||||||
|
model_client_stream (bool, optional): If `True`, the model client will be used in streaming mode.
|
||||||
|
:meth:`on_messages_stream` and :meth:`BaseChatAgent.run_stream` methods will
|
||||||
|
also yield :class:`~autogen_agentchat.messages.ModelClientStreamingChunkEvent`
|
||||||
|
messages as the model client produces chunks of response. Defaults to `False`.
|
||||||
|
description (str, optional): The description of the agent. If not provided,
|
||||||
|
:class:`~autogen_agentchat.agents.CodeExecutorAgent.DEFAULT_AGENT_DESCRIPTION` will be used.
|
||||||
|
system_message (str, optional): The system message for the model. If provided, it will be prepended to the messages in the model context when making an inference. Set to `None` to disable.
|
||||||
|
Defaults to :class:`~autogen_agentchat.agents.CodeExecutorAgent.DEFAULT_SYSTEM_MESSAGE`. This is only used if `model_client` is provided.
|
||||||
|
sources (Sequence[str], optional): Check only messages from the specified agents for the code to execute.
|
||||||
|
This is useful when the agent is part of a group chat and you want to limit the code execution to messages from specific agents.
|
||||||
|
If not provided, all messages will be checked for code blocks.
|
||||||
|
This is only used if `model_client` is not provided.
|
||||||
|
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
@ -101,8 +164,126 @@ class CodeExecutorAgent(BaseChatAgent, Component[CodeExecutorAgentConfig]):
|
||||||
|
|
||||||
asyncio.run(run_code_executor_agent())
|
asyncio.run(run_code_executor_agent())
|
||||||
|
|
||||||
|
In the following example, we show how to setup `CodeExecutorAgent` without `model_client` parameter for executing code blocks generated by other agents in a group chat using :py:class:`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor`
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
from autogen_ext.code_executors.docker import DockerCommandLineCodeExecutor
|
||||||
|
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
||||||
|
|
||||||
|
from autogen_agentchat.agents import AssistantAgent, CodeExecutorAgent
|
||||||
|
from autogen_agentchat.conditions import MaxMessageTermination
|
||||||
|
from autogen_agentchat.teams import RoundRobinGroupChat
|
||||||
|
from autogen_agentchat.ui import Console
|
||||||
|
|
||||||
|
termination_condition = MaxMessageTermination(3)
|
||||||
|
|
||||||
|
|
||||||
|
async def main() -> None:
|
||||||
|
model_client = OpenAIChatCompletionClient(model="gpt-4o")
|
||||||
|
|
||||||
|
# define the Docker CLI Code Executor
|
||||||
|
code_executor = DockerCommandLineCodeExecutor(work_dir="coding")
|
||||||
|
|
||||||
|
# start the execution container
|
||||||
|
await code_executor.start()
|
||||||
|
|
||||||
|
code_executor_agent = CodeExecutorAgent("code_executor_agent", code_executor=code_executor)
|
||||||
|
coder_agent = AssistantAgent("coder_agent", model_client=model_client)
|
||||||
|
|
||||||
|
groupchat = RoundRobinGroupChat(
|
||||||
|
participants=[coder_agent, code_executor_agent], termination_condition=termination_condition
|
||||||
|
)
|
||||||
|
|
||||||
|
task = "Write python code to print Hello World!"
|
||||||
|
await Console(groupchat.run_stream(task=task))
|
||||||
|
|
||||||
|
# stop the execution container
|
||||||
|
await code_executor.stop()
|
||||||
|
|
||||||
|
|
||||||
|
asyncio.run(main())
|
||||||
|
|
||||||
|
.. code-block:: text
|
||||||
|
|
||||||
|
---------- user ----------
|
||||||
|
Write python code to print Hello World!
|
||||||
|
---------- coder_agent ----------
|
||||||
|
Certainly! Here's a simple Python code to print "Hello World!":
|
||||||
|
|
||||||
|
```python
|
||||||
|
print("Hello World!")
|
||||||
|
```
|
||||||
|
|
||||||
|
You can run this code in any Python environment to display the message.
|
||||||
|
---------- code_executor_agent ----------
|
||||||
|
Hello World!
|
||||||
|
|
||||||
|
In the following example, we show how to setup `CodeExecutorAgent` with `model_client` that can generate its own code without the help of any other agent and executing it in :py:class:`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor`
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
from autogen_ext.code_executors.docker import DockerCommandLineCodeExecutor
|
||||||
|
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
||||||
|
|
||||||
|
from autogen_agentchat.agents import CodeExecutorAgent
|
||||||
|
from autogen_agentchat.conditions import TextMessageTermination
|
||||||
|
from autogen_agentchat.ui import Console
|
||||||
|
|
||||||
|
termination_condition = TextMessageTermination("code_executor_agent")
|
||||||
|
|
||||||
|
|
||||||
|
async def main() -> None:
|
||||||
|
model_client = OpenAIChatCompletionClient(model="gpt-4o")
|
||||||
|
|
||||||
|
# define the Docker CLI Code Executor
|
||||||
|
code_executor = DockerCommandLineCodeExecutor(work_dir="coding")
|
||||||
|
|
||||||
|
# start the execution container
|
||||||
|
await code_executor.start()
|
||||||
|
|
||||||
|
code_executor_agent = CodeExecutorAgent(
|
||||||
|
"code_executor_agent", code_executor=code_executor, model_client=model_client
|
||||||
|
)
|
||||||
|
|
||||||
|
task = "Write python code to print Hello World!"
|
||||||
|
await Console(code_executor_agent.run_stream(task=task))
|
||||||
|
|
||||||
|
# stop the execution container
|
||||||
|
await code_executor.stop()
|
||||||
|
|
||||||
|
|
||||||
|
asyncio.run(main())
|
||||||
|
|
||||||
|
.. code-block:: text
|
||||||
|
|
||||||
|
---------- user ----------
|
||||||
|
Write python code to print Hello World!
|
||||||
|
---------- code_executor_agent ----------
|
||||||
|
Certainly! Here is a simple Python code to print "Hello World!" to the console:
|
||||||
|
|
||||||
|
```python
|
||||||
|
print("Hello World!")
|
||||||
|
```
|
||||||
|
|
||||||
|
Let's execute it to confirm the output.
|
||||||
|
---------- code_executor_agent ----------
|
||||||
|
Hello World!
|
||||||
|
|
||||||
|
---------- code_executor_agent ----------
|
||||||
|
The code has been executed successfully, and it printed "Hello World!" as expected. If you have any more requests or questions, feel free to ask!
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
DEFAULT_TERMINAL_DESCRIPTION = "A computer terminal that performs no other action than running Python scripts (provided to it quoted in ```python code blocks), or sh shell scripts (provided to it quoted in ```sh code blocks)."
|
||||||
|
DEFAULT_AGENT_DESCRIPTION = "A Code Execution Agent that generates and executes Python and shell scripts based on user instructions. Python code should be provided in ```python code blocks, and sh shell scripts should be provided in ```sh code blocks for execution. It ensures correctness, efficiency, and minimal errors while gracefully handling edge cases."
|
||||||
|
DEFAULT_SYSTEM_MESSAGE = "You are a Code Execution Agent. Your role is to generate and execute Python code based on user instructions, ensuring correctness, efficiency, and minimal errors. Handle edge cases gracefully."
|
||||||
|
NO_CODE_BLOCKS_FOUND_MESSAGE = "No code blocks found in the thread. Please provide at least one markdown-encoded code block to execute (i.e., quoting code in ```python or ```sh code blocks)."
|
||||||
|
|
||||||
component_config_schema = CodeExecutorAgentConfig
|
component_config_schema = CodeExecutorAgentConfig
|
||||||
component_provider_override = "autogen_agentchat.agents.CodeExecutorAgent"
|
component_provider_override = "autogen_agentchat.agents.CodeExecutorAgent"
|
||||||
|
|
||||||
|
@ -111,12 +292,38 @@ class CodeExecutorAgent(BaseChatAgent, Component[CodeExecutorAgentConfig]):
|
||||||
name: str,
|
name: str,
|
||||||
code_executor: CodeExecutor,
|
code_executor: CodeExecutor,
|
||||||
*,
|
*,
|
||||||
description: str = "A computer terminal that performs no other action than running Python scripts (provided to it quoted in ```python code blocks), or sh shell scripts (provided to it quoted in ```sh code blocks).",
|
model_client: ChatCompletionClient | None = None,
|
||||||
|
model_context: ChatCompletionContext | None = None,
|
||||||
|
model_client_stream: bool = False,
|
||||||
|
description: str | None = None,
|
||||||
|
system_message: str | None = DEFAULT_SYSTEM_MESSAGE,
|
||||||
sources: Sequence[str] | None = None,
|
sources: Sequence[str] | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
if description is None:
|
||||||
|
if model_client is None:
|
||||||
|
description = CodeExecutorAgent.DEFAULT_TERMINAL_DESCRIPTION
|
||||||
|
else:
|
||||||
|
description = CodeExecutorAgent.DEFAULT_AGENT_DESCRIPTION
|
||||||
|
|
||||||
super().__init__(name=name, description=description)
|
super().__init__(name=name, description=description)
|
||||||
self._code_executor = code_executor
|
self._code_executor = code_executor
|
||||||
self._sources = sources
|
self._sources = sources
|
||||||
|
self._model_client_stream = model_client_stream
|
||||||
|
|
||||||
|
self._model_client = None
|
||||||
|
if model_client is not None:
|
||||||
|
self._model_client = model_client
|
||||||
|
|
||||||
|
if model_context is not None:
|
||||||
|
self._model_context = model_context
|
||||||
|
else:
|
||||||
|
self._model_context = UnboundedChatCompletionContext()
|
||||||
|
|
||||||
|
self._system_messaages: List[SystemMessage] = []
|
||||||
|
if system_message is None:
|
||||||
|
self._system_messages = []
|
||||||
|
else:
|
||||||
|
self._system_messages = [SystemMessage(content=system_message)]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:
|
def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:
|
||||||
|
@ -124,32 +331,159 @@ class CodeExecutorAgent(BaseChatAgent, Component[CodeExecutorAgentConfig]):
|
||||||
return (TextMessage,)
|
return (TextMessage,)
|
||||||
|
|
||||||
async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:
|
async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:
|
||||||
|
async for message in self.on_messages_stream(messages, cancellation_token):
|
||||||
|
if isinstance(message, Response):
|
||||||
|
return message
|
||||||
|
raise AssertionError("The stream should have returned the final result.")
|
||||||
|
|
||||||
|
async def on_messages_stream(
|
||||||
|
self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken
|
||||||
|
) -> AsyncGenerator[BaseAgentEvent | BaseChatMessage | Response, None]:
|
||||||
|
"""
|
||||||
|
Process the incoming messages with the assistant agent and yield events/responses as they happen.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Gather all relevant state here
|
||||||
|
agent_name = self.name
|
||||||
|
model_context = self._model_context
|
||||||
|
system_messages = self._system_messages
|
||||||
|
model_client = self._model_client
|
||||||
|
model_client_stream = self._model_client_stream
|
||||||
|
|
||||||
|
execution_result: CodeExecutionEvent | None = None
|
||||||
|
if model_client is None: # default behaviour for backward compatibility
|
||||||
|
# execute generated code if present
|
||||||
|
code_blocks: List[CodeBlock] = await self.extract_code_blocks_from_messages(messages)
|
||||||
|
if not code_blocks:
|
||||||
|
yield Response(
|
||||||
|
chat_message=TextMessage(
|
||||||
|
content=self.NO_CODE_BLOCKS_FOUND_MESSAGE,
|
||||||
|
source=agent_name,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return
|
||||||
|
execution_result = await self.execute_code_block(code_blocks, cancellation_token)
|
||||||
|
yield Response(chat_message=TextMessage(content=execution_result.to_text(), source=execution_result.source))
|
||||||
|
return
|
||||||
|
|
||||||
|
# STEP 1: Add new user/handoff messages to the model context
|
||||||
|
await self._add_messages_to_context(
|
||||||
|
model_context=model_context,
|
||||||
|
messages=messages,
|
||||||
|
)
|
||||||
|
|
||||||
|
# STEP 2: Update model context with any relevant memory
|
||||||
|
inner_messages: List[BaseAgentEvent | BaseChatMessage] = []
|
||||||
|
for event_msg in await self._update_model_context_with_memory(
|
||||||
|
memory=None,
|
||||||
|
model_context=model_context,
|
||||||
|
agent_name=agent_name,
|
||||||
|
):
|
||||||
|
inner_messages.append(event_msg)
|
||||||
|
yield event_msg
|
||||||
|
|
||||||
|
# STEP 3: Run the first inference
|
||||||
|
model_result = None
|
||||||
|
async for inference_output in self._call_llm(
|
||||||
|
model_client=model_client,
|
||||||
|
model_client_stream=model_client_stream,
|
||||||
|
system_messages=system_messages,
|
||||||
|
model_context=model_context,
|
||||||
|
agent_name=agent_name,
|
||||||
|
cancellation_token=cancellation_token,
|
||||||
|
):
|
||||||
|
if isinstance(inference_output, CreateResult):
|
||||||
|
model_result = inference_output
|
||||||
|
else:
|
||||||
|
# Streaming chunk event
|
||||||
|
yield inference_output
|
||||||
|
|
||||||
|
assert model_result is not None, "No model result was produced."
|
||||||
|
|
||||||
|
# --- NEW: If the model produced a hidden "thought," yield it as an event ---
|
||||||
|
if model_result.thought:
|
||||||
|
thought_event = ThoughtEvent(content=model_result.thought, source=agent_name)
|
||||||
|
yield thought_event
|
||||||
|
inner_messages.append(thought_event)
|
||||||
|
|
||||||
|
# Add the assistant message to the model context (including thought if present)
|
||||||
|
await model_context.add_message(
|
||||||
|
AssistantMessage(
|
||||||
|
content=model_result.content,
|
||||||
|
source=agent_name,
|
||||||
|
thought=getattr(model_result, "thought", None),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
code_blocks = self._extract_markdown_code_blocks(str(model_result.content))
|
||||||
|
|
||||||
|
if not code_blocks:
|
||||||
|
yield Response(
|
||||||
|
chat_message=TextMessage(
|
||||||
|
content=str(model_result.content),
|
||||||
|
source=agent_name,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# NOTE: error: Argument of type "str | List[FunctionCall]" cannot be assigned to parameter "content" of type "str" in function "__init__".
|
||||||
|
# For now we can assume that there are no FunctionCalls in the response because we are not providing tools to the CodeExecutorAgent.
|
||||||
|
# So, for now we cast model_result.content to string
|
||||||
|
inferred_text_message: CodeGenerationEvent = CodeGenerationEvent(
|
||||||
|
content=str(model_result.content),
|
||||||
|
code_blocks=code_blocks,
|
||||||
|
source=agent_name,
|
||||||
|
)
|
||||||
|
|
||||||
|
yield inferred_text_message
|
||||||
|
|
||||||
|
execution_result = await self.execute_code_block(inferred_text_message.code_blocks, cancellation_token)
|
||||||
|
|
||||||
|
# Add the code execution result to the model context
|
||||||
|
await model_context.add_message(
|
||||||
|
UserMessage(
|
||||||
|
content=execution_result.result.output,
|
||||||
|
source=agent_name,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
yield execution_result
|
||||||
|
|
||||||
|
# always reflect on the execution result
|
||||||
|
async for reflection_response in CodeExecutorAgent._reflect_on_code_block_results_flow(
|
||||||
|
system_messages=system_messages,
|
||||||
|
model_client=model_client,
|
||||||
|
model_client_stream=model_client_stream,
|
||||||
|
model_context=model_context,
|
||||||
|
agent_name=agent_name,
|
||||||
|
inner_messages=inner_messages,
|
||||||
|
):
|
||||||
|
yield reflection_response # last reflection_response is of type Response so it will finish the routine
|
||||||
|
|
||||||
|
async def extract_code_blocks_from_messages(self, messages: Sequence[BaseChatMessage]) -> List[CodeBlock]:
|
||||||
# Extract code blocks from the messages.
|
# Extract code blocks from the messages.
|
||||||
code_blocks: List[CodeBlock] = []
|
code_blocks: List[CodeBlock] = []
|
||||||
for msg in messages:
|
for msg in messages:
|
||||||
if isinstance(msg, TextMessage):
|
if self._sources is None or msg.source in self._sources:
|
||||||
if self._sources is None or msg.source in self._sources:
|
if isinstance(msg, TextMessage):
|
||||||
code_blocks.extend(self._extract_markdown_code_blocks(msg.content))
|
code_blocks.extend(self._extract_markdown_code_blocks(msg.content))
|
||||||
if code_blocks:
|
# TODO: handle other message types if needed
|
||||||
# Execute the code blocks.
|
return code_blocks
|
||||||
result = await self._code_executor.execute_code_blocks(code_blocks, cancellation_token=cancellation_token)
|
|
||||||
|
|
||||||
code_output = result.output
|
async def execute_code_block(
|
||||||
if code_output.strip() == "":
|
self, code_blocks: List[CodeBlock], cancellation_token: CancellationToken
|
||||||
# No output
|
) -> CodeExecutionEvent:
|
||||||
code_output = f"The script ran but produced no output to console. The POSIX exit code was: {result.exit_code}. If you were expecting output, consider revising the script to ensure content is printed to stdout."
|
# Execute the code blocks.
|
||||||
elif result.exit_code != 0:
|
result = await self._code_executor.execute_code_blocks(code_blocks, cancellation_token=cancellation_token)
|
||||||
# Error
|
|
||||||
code_output = f"The script ran, then exited with an error (POSIX exit code: {result.exit_code})\nIts output was:\n{result.output}"
|
|
||||||
|
|
||||||
return Response(chat_message=TextMessage(content=code_output, source=self.name))
|
if result.output.strip() == "":
|
||||||
else:
|
# No output
|
||||||
return Response(
|
result.output = f"The script ran but produced no output to console. The POSIX exit code was: {result.exit_code}. If you were expecting output, consider revising the script to ensure content is printed to stdout."
|
||||||
chat_message=TextMessage(
|
elif result.exit_code != 0:
|
||||||
content="No code blocks found in the thread. Please provide at least one markdown-encoded code block to execute (i.e., quoting code in ```python or ```sh code blocks).",
|
# Error
|
||||||
source=self.name,
|
result.output = f"The script ran, then exited with an error (POSIX exit code: {result.exit_code})\nIts output was:\n{result.output}"
|
||||||
)
|
|
||||||
)
|
return CodeExecutionEvent(result=result, source=self.name)
|
||||||
|
|
||||||
async def on_reset(self, cancellation_token: CancellationToken) -> None:
|
async def on_reset(self, cancellation_token: CancellationToken) -> None:
|
||||||
"""Its a no-op as the code executor agent has no mutable state."""
|
"""Its a no-op as the code executor agent has no mutable state."""
|
||||||
|
@ -168,16 +502,164 @@ class CodeExecutorAgent(BaseChatAgent, Component[CodeExecutorAgentConfig]):
|
||||||
def _to_config(self) -> CodeExecutorAgentConfig:
|
def _to_config(self) -> CodeExecutorAgentConfig:
|
||||||
return CodeExecutorAgentConfig(
|
return CodeExecutorAgentConfig(
|
||||||
name=self.name,
|
name=self.name,
|
||||||
|
model_client=(self._model_client.dump_component() if self._model_client is not None else None),
|
||||||
code_executor=self._code_executor.dump_component(),
|
code_executor=self._code_executor.dump_component(),
|
||||||
description=self.description,
|
description=self.description,
|
||||||
sources=list(self._sources) if self._sources is not None else None,
|
sources=list(self._sources) if self._sources is not None else None,
|
||||||
|
system_message=(
|
||||||
|
self._system_messages[0].content
|
||||||
|
if self._system_messages and isinstance(self._system_messages[0].content, str)
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
model_client_stream=self._model_client_stream,
|
||||||
|
model_context=self._model_context.dump_component(),
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _from_config(cls, config: CodeExecutorAgentConfig) -> Self:
|
def _from_config(cls, config: CodeExecutorAgentConfig) -> Self:
|
||||||
return cls(
|
return cls(
|
||||||
name=config.name,
|
name=config.name,
|
||||||
|
model_client=(
|
||||||
|
ChatCompletionClient.load_component(config.model_client) if config.model_client is not None else None
|
||||||
|
),
|
||||||
code_executor=CodeExecutor.load_component(config.code_executor),
|
code_executor=CodeExecutor.load_component(config.code_executor),
|
||||||
description=config.description,
|
description=config.description,
|
||||||
sources=config.sources,
|
sources=config.sources,
|
||||||
|
system_message=config.system_message,
|
||||||
|
model_client_stream=config.model_client_stream,
|
||||||
|
model_context=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_compatible_context(model_client: ChatCompletionClient, messages: List[LLMMessage]) -> Sequence[LLMMessage]:
|
||||||
|
"""Ensure that the messages are compatible with the underlying client, by removing images if needed."""
|
||||||
|
if model_client.model_info["vision"]:
|
||||||
|
return messages
|
||||||
|
else:
|
||||||
|
return remove_images(messages)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def _call_llm(
|
||||||
|
cls,
|
||||||
|
model_client: ChatCompletionClient,
|
||||||
|
model_client_stream: bool,
|
||||||
|
system_messages: List[SystemMessage],
|
||||||
|
model_context: ChatCompletionContext,
|
||||||
|
agent_name: str,
|
||||||
|
cancellation_token: CancellationToken,
|
||||||
|
) -> AsyncGenerator[Union[CreateResult, ModelClientStreamingChunkEvent], None]:
|
||||||
|
"""
|
||||||
|
Perform a model inference and yield either streaming chunk events or the final CreateResult.
|
||||||
|
"""
|
||||||
|
all_messages = await model_context.get_messages()
|
||||||
|
llm_messages = cls._get_compatible_context(model_client=model_client, messages=system_messages + all_messages)
|
||||||
|
|
||||||
|
if model_client_stream:
|
||||||
|
model_result: Optional[CreateResult] = None
|
||||||
|
async for chunk in model_client.create_stream(
|
||||||
|
llm_messages, tools=[], cancellation_token=cancellation_token
|
||||||
|
):
|
||||||
|
if isinstance(chunk, CreateResult):
|
||||||
|
model_result = chunk
|
||||||
|
elif isinstance(chunk, str):
|
||||||
|
yield ModelClientStreamingChunkEvent(content=chunk, source=agent_name)
|
||||||
|
else:
|
||||||
|
raise RuntimeError(f"Invalid chunk type: {type(chunk)}")
|
||||||
|
if model_result is None:
|
||||||
|
raise RuntimeError("No final model result in streaming mode.")
|
||||||
|
yield model_result
|
||||||
|
else:
|
||||||
|
model_result = await model_client.create(llm_messages, tools=[], cancellation_token=cancellation_token)
|
||||||
|
yield model_result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def _update_model_context_with_memory(
|
||||||
|
memory: Optional[Sequence[Memory]],
|
||||||
|
model_context: ChatCompletionContext,
|
||||||
|
agent_name: str,
|
||||||
|
) -> List[MemoryQueryEvent]:
|
||||||
|
"""
|
||||||
|
If memory modules are present, update the model context and return the events produced.
|
||||||
|
"""
|
||||||
|
events: List[MemoryQueryEvent] = []
|
||||||
|
if memory:
|
||||||
|
for mem in memory:
|
||||||
|
update_context_result = await mem.update_context(model_context)
|
||||||
|
if update_context_result and len(update_context_result.memories.results) > 0:
|
||||||
|
memory_query_event_msg = MemoryQueryEvent(
|
||||||
|
content=update_context_result.memories.results,
|
||||||
|
source=agent_name,
|
||||||
|
)
|
||||||
|
events.append(memory_query_event_msg)
|
||||||
|
return events
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def _add_messages_to_context(
|
||||||
|
model_context: ChatCompletionContext,
|
||||||
|
messages: Sequence[BaseChatMessage],
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Add incoming messages to the model context.
|
||||||
|
"""
|
||||||
|
for msg in messages:
|
||||||
|
if isinstance(msg, HandoffMessage):
|
||||||
|
for llm_msg in msg.context:
|
||||||
|
await model_context.add_message(llm_msg)
|
||||||
|
await model_context.add_message(msg.to_model_message())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def _reflect_on_code_block_results_flow(
|
||||||
|
cls,
|
||||||
|
system_messages: List[SystemMessage],
|
||||||
|
model_client: ChatCompletionClient,
|
||||||
|
model_client_stream: bool,
|
||||||
|
model_context: ChatCompletionContext,
|
||||||
|
agent_name: str,
|
||||||
|
inner_messages: List[BaseAgentEvent | BaseChatMessage],
|
||||||
|
) -> AsyncGenerator[Response | ModelClientStreamingChunkEvent | ThoughtEvent, None]:
|
||||||
|
"""
|
||||||
|
If reflect_on_code_block_results=True, we do another inference based on tool results
|
||||||
|
and yield the final text response (or streaming chunks).
|
||||||
|
"""
|
||||||
|
all_messages = system_messages + await model_context.get_messages()
|
||||||
|
llm_messages = cls._get_compatible_context(model_client=model_client, messages=all_messages)
|
||||||
|
|
||||||
|
reflection_result: Optional[CreateResult] = None
|
||||||
|
|
||||||
|
if model_client_stream:
|
||||||
|
async for chunk in model_client.create_stream(llm_messages):
|
||||||
|
if isinstance(chunk, CreateResult):
|
||||||
|
reflection_result = chunk
|
||||||
|
elif isinstance(chunk, str):
|
||||||
|
yield ModelClientStreamingChunkEvent(content=chunk, source=agent_name)
|
||||||
|
else:
|
||||||
|
raise RuntimeError(f"Invalid chunk type: {type(chunk)}")
|
||||||
|
else:
|
||||||
|
reflection_result = await model_client.create(llm_messages)
|
||||||
|
|
||||||
|
if not reflection_result or not isinstance(reflection_result.content, str):
|
||||||
|
raise RuntimeError("Reflect on tool use produced no valid text response.")
|
||||||
|
|
||||||
|
# --- NEW: If the reflection produced a thought, yield it ---
|
||||||
|
if reflection_result.thought:
|
||||||
|
thought_event = ThoughtEvent(content=reflection_result.thought, source=agent_name)
|
||||||
|
yield thought_event
|
||||||
|
inner_messages.append(thought_event)
|
||||||
|
|
||||||
|
# Add to context (including thought if present)
|
||||||
|
await model_context.add_message(
|
||||||
|
AssistantMessage(
|
||||||
|
content=reflection_result.content,
|
||||||
|
source=agent_name,
|
||||||
|
thought=getattr(reflection_result, "thought", None),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
yield Response(
|
||||||
|
chat_message=TextMessage(
|
||||||
|
content=reflection_result.content,
|
||||||
|
source=agent_name,
|
||||||
|
models_usage=reflection_result.usage,
|
||||||
|
),
|
||||||
|
inner_messages=inner_messages,
|
||||||
)
|
)
|
||||||
|
|
|
@ -8,8 +8,14 @@ from abc import ABC, abstractmethod
|
||||||
from typing import Any, Dict, Generic, List, Literal, Mapping, TypeVar
|
from typing import Any, Dict, Generic, List, Literal, Mapping, TypeVar
|
||||||
|
|
||||||
from autogen_core import FunctionCall, Image
|
from autogen_core import FunctionCall, Image
|
||||||
|
from autogen_core.code_executor import CodeBlock, CodeResult
|
||||||
from autogen_core.memory import MemoryContent
|
from autogen_core.memory import MemoryContent
|
||||||
from autogen_core.models import FunctionExecutionResult, LLMMessage, RequestUsage, UserMessage
|
from autogen_core.models import (
|
||||||
|
FunctionExecutionResult,
|
||||||
|
LLMMessage,
|
||||||
|
RequestUsage,
|
||||||
|
UserMessage,
|
||||||
|
)
|
||||||
from pydantic import BaseModel, Field, computed_field
|
from pydantic import BaseModel, Field, computed_field
|
||||||
from typing_extensions import Annotated, Self
|
from typing_extensions import Annotated, Self
|
||||||
|
|
||||||
|
@ -96,7 +102,8 @@ class BaseChatMessage(BaseMessage, ABC):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def to_model_message(self) -> UserMessage:
|
def to_model_message(self) -> UserMessage:
|
||||||
"""Convert the message content to a :class:`~autogen_core.models.UserMessage`
|
"""Convert the message content to a :class:`~autogen_core.models.UserMessage`
|
||||||
for use with model client, e.g., :class:`~autogen_core.models.ChatCompletionClient`."""
|
for use with model client, e.g., :class:`~autogen_core.models.ChatCompletionClient`.
|
||||||
|
"""
|
||||||
...
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@ -282,6 +289,28 @@ class ToolCallRequestEvent(BaseAgentEvent):
|
||||||
return str(self.content)
|
return str(self.content)
|
||||||
|
|
||||||
|
|
||||||
|
class CodeGenerationEvent(BaseAgentEvent):
|
||||||
|
"""An event signaling code generation for execution."""
|
||||||
|
|
||||||
|
content: str
|
||||||
|
"The complete content as string."
|
||||||
|
|
||||||
|
type: Literal["CodeGenerationEvent"] = "CodeGenerationEvent"
|
||||||
|
|
||||||
|
code_blocks: List[CodeBlock]
|
||||||
|
|
||||||
|
def to_text(self) -> str:
|
||||||
|
return self.content
|
||||||
|
|
||||||
|
|
||||||
|
class CodeExecutionEvent(BaseAgentEvent):
|
||||||
|
type: Literal["CodeExecutionEvent"] = "CodeExecutionEvent"
|
||||||
|
result: CodeResult
|
||||||
|
|
||||||
|
def to_text(self) -> str:
|
||||||
|
return self.result.output
|
||||||
|
|
||||||
|
|
||||||
class ToolCallExecutionEvent(BaseAgentEvent):
|
class ToolCallExecutionEvent(BaseAgentEvent):
|
||||||
"""An event signaling the execution of tool calls."""
|
"""An event signaling the execution of tool calls."""
|
||||||
|
|
||||||
|
@ -369,6 +398,8 @@ class MessageFactory:
|
||||||
self._message_types[UserInputRequestedEvent.__name__] = UserInputRequestedEvent
|
self._message_types[UserInputRequestedEvent.__name__] = UserInputRequestedEvent
|
||||||
self._message_types[ModelClientStreamingChunkEvent.__name__] = ModelClientStreamingChunkEvent
|
self._message_types[ModelClientStreamingChunkEvent.__name__] = ModelClientStreamingChunkEvent
|
||||||
self._message_types[ThoughtEvent.__name__] = ThoughtEvent
|
self._message_types[ThoughtEvent.__name__] = ThoughtEvent
|
||||||
|
self._message_types[CodeGenerationEvent.__name__] = CodeGenerationEvent
|
||||||
|
self._message_types[CodeExecutionEvent.__name__] = CodeExecutionEvent
|
||||||
|
|
||||||
def is_registered(self, message_type: type[BaseAgentEvent | BaseChatMessage]) -> bool:
|
def is_registered(self, message_type: type[BaseAgentEvent | BaseChatMessage]) -> bool:
|
||||||
"""Check if a message type is registered with the factory."""
|
"""Check if a message type is registered with the factory."""
|
||||||
|
@ -409,7 +440,8 @@ class MessageFactory:
|
||||||
|
|
||||||
|
|
||||||
ChatMessage = Annotated[
|
ChatMessage = Annotated[
|
||||||
TextMessage | MultiModalMessage | StopMessage | ToolCallSummaryMessage | HandoffMessage, Field(discriminator="type")
|
TextMessage | MultiModalMessage | StopMessage | ToolCallSummaryMessage | HandoffMessage,
|
||||||
|
Field(discriminator="type"),
|
||||||
]
|
]
|
||||||
"""The union type of all built-in concrete subclasses of :class:`BaseChatMessage`.
|
"""The union type of all built-in concrete subclasses of :class:`BaseChatMessage`.
|
||||||
It does not include :class:`StructuredMessage` types."""
|
It does not include :class:`StructuredMessage` types."""
|
||||||
|
@ -420,7 +452,9 @@ AgentEvent = Annotated[
|
||||||
| MemoryQueryEvent
|
| MemoryQueryEvent
|
||||||
| UserInputRequestedEvent
|
| UserInputRequestedEvent
|
||||||
| ModelClientStreamingChunkEvent
|
| ModelClientStreamingChunkEvent
|
||||||
| ThoughtEvent,
|
| ThoughtEvent
|
||||||
|
| CodeGenerationEvent
|
||||||
|
| CodeExecutionEvent,
|
||||||
Field(discriminator="type"),
|
Field(discriminator="type"),
|
||||||
]
|
]
|
||||||
"""The union type of all built-in concrete subclasses of :class:`BaseAgentEvent`."""
|
"""The union type of all built-in concrete subclasses of :class:`BaseAgentEvent`."""
|
||||||
|
@ -446,4 +480,6 @@ __all__ = [
|
||||||
"ModelClientStreamingChunkEvent",
|
"ModelClientStreamingChunkEvent",
|
||||||
"ThoughtEvent",
|
"ThoughtEvent",
|
||||||
"MessageFactory",
|
"MessageFactory",
|
||||||
|
"CodeGenerationEvent",
|
||||||
|
"CodeExecutionEvent",
|
||||||
]
|
]
|
||||||
|
|
|
@ -1,9 +1,14 @@
|
||||||
import pytest
|
import pytest
|
||||||
from autogen_agentchat.agents import CodeExecutorAgent
|
from autogen_agentchat.agents import CodeExecutorAgent
|
||||||
from autogen_agentchat.base import Response
|
from autogen_agentchat.base import Response
|
||||||
from autogen_agentchat.messages import TextMessage
|
from autogen_agentchat.messages import (
|
||||||
|
CodeExecutionEvent,
|
||||||
|
CodeGenerationEvent,
|
||||||
|
TextMessage,
|
||||||
|
)
|
||||||
from autogen_core import CancellationToken
|
from autogen_core import CancellationToken
|
||||||
from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor
|
from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor
|
||||||
|
from autogen_ext.models.replay import ReplayChatCompletionClient
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
|
@ -34,6 +39,98 @@ print("%0.3f" % (square_root,))
|
||||||
assert response.chat_message.source == "code_executor"
|
assert response.chat_message.source == "code_executor"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_code_generation_and_execution_with_model_client() -> None:
|
||||||
|
"""
|
||||||
|
Tests the code generation, execution and reflection pipeline using a model client.
|
||||||
|
"""
|
||||||
|
|
||||||
|
language = "python"
|
||||||
|
code = 'import math\n\nnumber = 42\nsquare_root = math.sqrt(number)\nprint("%0.3f" % (square_root,))'
|
||||||
|
|
||||||
|
model_client = ReplayChatCompletionClient(
|
||||||
|
[f"Here is the code to calculate the square root of 42:\n```{language}\n{code}```".strip(), "TERMINATE"]
|
||||||
|
)
|
||||||
|
|
||||||
|
agent = CodeExecutorAgent(
|
||||||
|
name="code_executor_agent", code_executor=LocalCommandLineCodeExecutor(), model_client=model_client
|
||||||
|
)
|
||||||
|
|
||||||
|
messages = [
|
||||||
|
TextMessage(
|
||||||
|
content="Generate python code to calculate the square root of 42",
|
||||||
|
source="assistant",
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
code_generation_event: CodeGenerationEvent | None = None
|
||||||
|
code_execution_event: CodeExecutionEvent | None = None
|
||||||
|
response: Response | None = None
|
||||||
|
|
||||||
|
async for message in agent.on_messages_stream(messages, CancellationToken()):
|
||||||
|
if isinstance(message, CodeGenerationEvent):
|
||||||
|
code_block = message.code_blocks[0]
|
||||||
|
assert code_block.code == code, "Code block does not match"
|
||||||
|
assert code_block.language == language, "Language does not match"
|
||||||
|
code_generation_event = message
|
||||||
|
elif isinstance(message, CodeExecutionEvent):
|
||||||
|
assert message.to_text().strip() == "6.481", f"Expected '6.481', got: {message.to_text().strip()}"
|
||||||
|
code_execution_event = message
|
||||||
|
elif isinstance(message, Response):
|
||||||
|
assert isinstance(
|
||||||
|
message.chat_message, TextMessage
|
||||||
|
), f"Expected TextMessage, got: {type(message.chat_message)}"
|
||||||
|
assert (
|
||||||
|
message.chat_message.source == "code_executor_agent"
|
||||||
|
), f"Expected source 'code_executor_agent', got: {message.chat_message.source}"
|
||||||
|
response = message
|
||||||
|
else:
|
||||||
|
raise AssertionError(f"Unexpected message type: {type(message)}")
|
||||||
|
|
||||||
|
assert code_generation_event is not None, "Code generation event was not received"
|
||||||
|
assert code_execution_event is not None, "Code execution event was not received"
|
||||||
|
assert response is not None, "Response was not received"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_no_code_response_with_model_client() -> None:
|
||||||
|
"""
|
||||||
|
Tests agent behavior when the model client responds with non-code content.
|
||||||
|
"""
|
||||||
|
|
||||||
|
model_client = ReplayChatCompletionClient(["The capital of France is Paris.", "TERMINATE"])
|
||||||
|
|
||||||
|
agent = CodeExecutorAgent(
|
||||||
|
name="code_executor_agent", code_executor=LocalCommandLineCodeExecutor(), model_client=model_client
|
||||||
|
)
|
||||||
|
|
||||||
|
messages = [
|
||||||
|
TextMessage(
|
||||||
|
content="What is the capital of France?",
|
||||||
|
source="assistant",
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
response: Response | None = None
|
||||||
|
|
||||||
|
async for message in agent.on_messages_stream(messages, CancellationToken()):
|
||||||
|
if isinstance(message, Response):
|
||||||
|
assert isinstance(
|
||||||
|
message.chat_message, TextMessage
|
||||||
|
), f"Expected TextMessage, got: {type(message.chat_message)}"
|
||||||
|
assert (
|
||||||
|
message.chat_message.source == "code_executor_agent"
|
||||||
|
), f"Expected source 'code_executor_agent', got: {message.chat_message.source}"
|
||||||
|
assert (
|
||||||
|
message.chat_message.content.strip() == "The capital of France is Paris."
|
||||||
|
), f"Expected 'The capital of France is Paris.', got: {message.chat_message.content.strip()}"
|
||||||
|
response = message
|
||||||
|
else:
|
||||||
|
raise AssertionError(f"Unexpected message type: {type(message)}")
|
||||||
|
|
||||||
|
assert response is not None, "Response was not received"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_code_execution_error() -> None:
|
async def test_code_execution_error() -> None:
|
||||||
"""Test basic code execution"""
|
"""Test basic code execution"""
|
||||||
|
@ -178,3 +275,22 @@ async def test_code_execution_agent_serialization() -> None:
|
||||||
|
|
||||||
assert isinstance(deserialized_agent, CodeExecutorAgent)
|
assert isinstance(deserialized_agent, CodeExecutorAgent)
|
||||||
assert deserialized_agent.name == "code_executor"
|
assert deserialized_agent.name == "code_executor"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_code_execution_agent_serialization_with_model_client() -> None:
|
||||||
|
"""Test agent config serialization"""
|
||||||
|
|
||||||
|
model_client = ReplayChatCompletionClient(["The capital of France is Paris.", "TERMINATE"])
|
||||||
|
|
||||||
|
agent = CodeExecutorAgent(
|
||||||
|
name="code_executor_agent", code_executor=LocalCommandLineCodeExecutor(), model_client=model_client
|
||||||
|
)
|
||||||
|
|
||||||
|
# Serialize and deserialize the agent
|
||||||
|
serialized_agent = agent.dump_component()
|
||||||
|
deserialized_agent = CodeExecutorAgent.load_component(serialized_agent)
|
||||||
|
|
||||||
|
assert isinstance(deserialized_agent, CodeExecutorAgent)
|
||||||
|
assert deserialized_agent.name == "code_executor_agent"
|
||||||
|
assert deserialized_agent._model_client is not None # type: ignore
|
||||||
|
|
|
@ -291,7 +291,7 @@
|
||||||
"A --- B\n",
|
"A --- B\n",
|
||||||
"| |\n",
|
"| |\n",
|
||||||
"| |\n",
|
"| |\n",
|
||||||
"C --- D\n",
|
"D --- C\n",
|
||||||
"```\n",
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Each solver agent is connected to two other solver agents. \n",
|
"Each solver agent is connected to two other solver agents. \n",
|
||||||
|
|
|
@ -158,7 +158,7 @@
|
||||||
"source": [
|
"source": [
|
||||||
"from autogen_core import AgentId, SingleThreadedAgentRuntime\n",
|
"from autogen_core import AgentId, SingleThreadedAgentRuntime\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Create an local embedded runtime.\n",
|
"# Create a local embedded runtime.\n",
|
||||||
"runtime = SingleThreadedAgentRuntime()\n",
|
"runtime = SingleThreadedAgentRuntime()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Register the modifier and checker agents by providing\n",
|
"# Register the modifier and checker agents by providing\n",
|
||||||
|
|
|
@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "autogen-core"
|
name = "autogen-core"
|
||||||
version = "0.5.1"
|
version = "0.5.2"
|
||||||
license = {file = "LICENSE-CODE"}
|
license = {file = "LICENSE-CODE"}
|
||||||
description = "Foundational interfaces and agent runtime implementation for AutoGen"
|
description = "Foundational interfaces and agent runtime implementation for AutoGen"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
@ -69,7 +69,7 @@ dev = [
|
||||||
"pygments",
|
"pygments",
|
||||||
"sphinxext-rediraffe",
|
"sphinxext-rediraffe",
|
||||||
|
|
||||||
"autogen_ext==0.5.1",
|
"autogen_ext==0.5.2",
|
||||||
|
|
||||||
# Documentation tooling
|
# Documentation tooling
|
||||||
"diskcache",
|
"diskcache",
|
||||||
|
|
|
@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "autogen-ext"
|
name = "autogen-ext"
|
||||||
version = "0.5.1"
|
version = "0.5.2"
|
||||||
license = {file = "LICENSE-CODE"}
|
license = {file = "LICENSE-CODE"}
|
||||||
description = "AutoGen extensions library"
|
description = "AutoGen extensions library"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
@ -15,7 +15,7 @@ classifiers = [
|
||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
]
|
]
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"autogen-core==0.5.1",
|
"autogen-core==0.5.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
|
@ -31,7 +31,7 @@ docker = ["docker~=7.0", "asyncio_atexit>=1.0.1"]
|
||||||
ollama = ["ollama>=0.4.7", "tiktoken>=0.8.0"]
|
ollama = ["ollama>=0.4.7", "tiktoken>=0.8.0"]
|
||||||
openai = ["openai>=1.66.5", "tiktoken>=0.8.0", "aiofiles"]
|
openai = ["openai>=1.66.5", "tiktoken>=0.8.0", "aiofiles"]
|
||||||
file-surfer = [
|
file-surfer = [
|
||||||
"autogen-agentchat==0.5.1",
|
"autogen-agentchat==0.5.2",
|
||||||
"magika>=0.6.1rc2",
|
"magika>=0.6.1rc2",
|
||||||
"markitdown[all]~=0.1.0a3",
|
"markitdown[all]~=0.1.0a3",
|
||||||
]
|
]
|
||||||
|
@ -43,21 +43,21 @@ llama-cpp = [
|
||||||
graphrag = ["graphrag>=1.0.1"]
|
graphrag = ["graphrag>=1.0.1"]
|
||||||
chromadb = ["chromadb>=1.0.0"]
|
chromadb = ["chromadb>=1.0.0"]
|
||||||
web-surfer = [
|
web-surfer = [
|
||||||
"autogen-agentchat==0.5.1",
|
"autogen-agentchat==0.5.2",
|
||||||
"playwright>=1.48.0",
|
"playwright>=1.48.0",
|
||||||
"pillow>=11.0.0",
|
"pillow>=11.0.0",
|
||||||
"magika>=0.6.1rc2",
|
"magika>=0.6.1rc2",
|
||||||
"markitdown[all]~=0.1.0a3",
|
"markitdown[all]~=0.1.0a3",
|
||||||
]
|
]
|
||||||
magentic-one = [
|
magentic-one = [
|
||||||
"autogen-agentchat==0.5.1",
|
"autogen-agentchat==0.5.2",
|
||||||
"magika>=0.6.1rc2",
|
"magika>=0.6.1rc2",
|
||||||
"markitdown[all]~=0.1.0a3",
|
"markitdown[all]~=0.1.0a3",
|
||||||
"playwright>=1.48.0",
|
"playwright>=1.48.0",
|
||||||
"pillow>=11.0.0",
|
"pillow>=11.0.0",
|
||||||
]
|
]
|
||||||
video-surfer = [
|
video-surfer = [
|
||||||
"autogen-agentchat==0.5.1",
|
"autogen-agentchat==0.5.2",
|
||||||
"opencv-python>=4.5",
|
"opencv-python>=4.5",
|
||||||
"ffmpeg-python",
|
"ffmpeg-python",
|
||||||
"openai-whisper",
|
"openai-whisper",
|
||||||
|
@ -137,7 +137,7 @@ rich = ["rich>=13.9.4"]
|
||||||
|
|
||||||
mcp = [
|
mcp = [
|
||||||
"mcp>=1.6.0",
|
"mcp>=1.6.0",
|
||||||
"json-schema-to-pydantic>=0.2.3"
|
"json-schema-to-pydantic>=0.2.4"
|
||||||
]
|
]
|
||||||
|
|
||||||
[tool.hatch.build.targets.wheel]
|
[tool.hatch.build.targets.wheel]
|
||||||
|
@ -149,6 +149,7 @@ dev = [
|
||||||
"langchain-experimental",
|
"langchain-experimental",
|
||||||
"pandas-stubs>=2.2.3.241126",
|
"pandas-stubs>=2.2.3.241126",
|
||||||
"httpx>=0.28.1",
|
"httpx>=0.28.1",
|
||||||
|
"opentelemetry-proto>=1.28.0"
|
||||||
]
|
]
|
||||||
|
|
||||||
[tool.ruff]
|
[tool.ruff]
|
||||||
|
|
|
@ -27,7 +27,7 @@ class McpToolAdapter(BaseTool[BaseModel, Any], ABC, Generic[TServerParams]):
|
||||||
|
|
||||||
def __init__(self, actor: McpSessionActor, tool: Tool) -> None:
|
def __init__(self, actor: McpSessionActor, tool: Tool) -> None:
|
||||||
self._tool = tool
|
self._tool = tool
|
||||||
self.actor = actor
|
self._actor = actor
|
||||||
# self.actor = self._actor.actor
|
# self.actor = self._actor.actor
|
||||||
|
|
||||||
# Extract name and description
|
# Extract name and description
|
||||||
|
@ -65,7 +65,7 @@ class McpToolAdapter(BaseTool[BaseModel, Any], ABC, Generic[TServerParams]):
|
||||||
try:
|
try:
|
||||||
if cancellation_token.is_cancelled():
|
if cancellation_token.is_cancelled():
|
||||||
raise Exception("Operation cancelled")
|
raise Exception("Operation cancelled")
|
||||||
result_future = await self.actor.call(name=self._tool.name, kwargs=kwargs)
|
result_future = await self._actor.call(name=self._tool.name, kwargs=kwargs)
|
||||||
cancellation_token.link_future(result_future)
|
cancellation_token.link_future(result_future)
|
||||||
result = await result_future
|
result = await result_future
|
||||||
|
|
||||||
|
@ -121,4 +121,4 @@ class McpToolAdapter(BaseTool[BaseModel, Any], ABC, Generic[TServerParams]):
|
||||||
|
|
||||||
async def close(self) -> None:
|
async def close(self) -> None:
|
||||||
"""Close the adapter and release resources."""
|
"""Close the adapter and release resources."""
|
||||||
await self.actor.close()
|
await self._actor.close()
|
||||||
|
|
|
@ -97,7 +97,7 @@ class SseMcpToolAdapter(
|
||||||
Returns:
|
Returns:
|
||||||
SseMcpToolAdapterConfig: The configuration of the adapter.
|
SseMcpToolAdapterConfig: The configuration of the adapter.
|
||||||
"""
|
"""
|
||||||
return SseMcpToolAdapterConfig(actor=self.actor.dump_component(), tool=self._tool)
|
return SseMcpToolAdapterConfig(actor=self._actor.dump_component(), tool=self._tool)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _from_config(cls, config: SseMcpToolAdapterConfig) -> Self:
|
def _from_config(cls, config: SseMcpToolAdapterConfig) -> Self:
|
||||||
|
|
|
@ -55,7 +55,7 @@ class StdioMcpToolAdapter(
|
||||||
Returns:
|
Returns:
|
||||||
StdioMcpToolAdapterConfig: The configuration of the adapter.
|
StdioMcpToolAdapterConfig: The configuration of the adapter.
|
||||||
"""
|
"""
|
||||||
return StdioMcpToolAdapterConfig(actor=self.actor.dump_component(), tool=self._tool)
|
return StdioMcpToolAdapterConfig(actor=self._actor.dump_component(), tool=self._tool)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _from_config(cls, config: StdioMcpToolAdapterConfig) -> Self:
|
def _from_config(cls, config: StdioMcpToolAdapterConfig) -> Self:
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
from contextlib import asynccontextmanager
|
|
||||||
import logging
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
|
from contextlib import asynccontextmanager
|
||||||
from unittest.mock import AsyncMock, MagicMock
|
from unittest.mock import AsyncMock, MagicMock
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -120,6 +120,7 @@ async def test_mcp_tool_execution(
|
||||||
caplog: pytest.LogCaptureFixture,
|
caplog: pytest.LogCaptureFixture,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Test that adapter properly executes tools through ClientSession."""
|
"""Test that adapter properly executes tools through ClientSession."""
|
||||||
|
|
||||||
@asynccontextmanager
|
@asynccontextmanager
|
||||||
async def fake_create_session(*args, **kwargs): # type: ignore
|
async def fake_create_session(*args, **kwargs): # type: ignore
|
||||||
yield mock_session
|
yield mock_session
|
||||||
|
@ -162,6 +163,7 @@ async def test_adapter_from_server_params(
|
||||||
"autogen_ext.tools.mcp._base.create_mcp_server_session",
|
"autogen_ext.tools.mcp._base.create_mcp_server_session",
|
||||||
lambda *args, **kwargs: mock_context, # type: ignore
|
lambda *args, **kwargs: mock_context, # type: ignore
|
||||||
)
|
)
|
||||||
|
|
||||||
@asynccontextmanager
|
@asynccontextmanager
|
||||||
async def fake_create_session(*args, **kwargs): # type: ignore
|
async def fake_create_session(*args, **kwargs): # type: ignore
|
||||||
try:
|
try:
|
||||||
|
@ -269,7 +271,7 @@ async def test_sse_tool_execution(
|
||||||
|
|
||||||
# Check log.
|
# Check log.
|
||||||
assert "test_output" in caplog.text
|
assert "test_output" in caplog.text
|
||||||
|
|
||||||
await actor.close()
|
await actor.close()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -32,9 +32,9 @@ dependencies = [
|
||||||
"loguru",
|
"loguru",
|
||||||
"pyyaml",
|
"pyyaml",
|
||||||
"html2text",
|
"html2text",
|
||||||
"autogen-core>=0.4.9.2,<0.5",
|
"autogen-core>=0.4.9.2,<0.6",
|
||||||
"autogen-agentchat>=0.4.9.2,<0.5",
|
"autogen-agentchat>=0.4.9.2,<0.6",
|
||||||
"autogen-ext[magentic-one, openai, azure]>=0.4.2,<0.5",
|
"autogen-ext[magentic-one, openai, azure]>=0.4.2,<0.6",
|
||||||
"anthropic",
|
"anthropic",
|
||||||
]
|
]
|
||||||
optional-dependencies = {web = ["fastapi", "uvicorn"], database = ["psycopg"]}
|
optional-dependencies = {web = ["fastapi", "uvicorn"], database = ["psycopg"]}
|
||||||
|
|
|
@ -452,7 +452,7 @@ wheels = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "autogen-agentchat"
|
name = "autogen-agentchat"
|
||||||
version = "0.5.1"
|
version = "0.5.2"
|
||||||
source = { editable = "packages/autogen-agentchat" }
|
source = { editable = "packages/autogen-agentchat" }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "autogen-core" },
|
{ name = "autogen-core" },
|
||||||
|
@ -463,7 +463,7 @@ requires-dist = [{ name = "autogen-core", editable = "packages/autogen-core" }]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "autogen-core"
|
name = "autogen-core"
|
||||||
version = "0.5.1"
|
version = "0.5.2"
|
||||||
source = { editable = "packages/autogen-core" }
|
source = { editable = "packages/autogen-core" }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "jsonref" },
|
{ name = "jsonref" },
|
||||||
|
@ -582,7 +582,7 @@ dev = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "autogen-ext"
|
name = "autogen-ext"
|
||||||
version = "0.5.1"
|
version = "0.5.2"
|
||||||
source = { editable = "packages/autogen-ext" }
|
source = { editable = "packages/autogen-ext" }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "autogen-core" },
|
{ name = "autogen-core" },
|
||||||
|
@ -717,6 +717,7 @@ dev = [
|
||||||
{ name = "autogen-test-utils" },
|
{ name = "autogen-test-utils" },
|
||||||
{ name = "httpx" },
|
{ name = "httpx" },
|
||||||
{ name = "langchain-experimental" },
|
{ name = "langchain-experimental" },
|
||||||
|
{ name = "opentelemetry-proto" },
|
||||||
{ name = "pandas-stubs" },
|
{ name = "pandas-stubs" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -745,7 +746,7 @@ requires-dist = [
|
||||||
{ name = "httpx", marker = "extra == 'http-tool'", specifier = ">=0.27.0" },
|
{ name = "httpx", marker = "extra == 'http-tool'", specifier = ">=0.27.0" },
|
||||||
{ name = "ipykernel", marker = "extra == 'jupyter-executor'", specifier = ">=6.29.5" },
|
{ name = "ipykernel", marker = "extra == 'jupyter-executor'", specifier = ">=6.29.5" },
|
||||||
{ name = "json-schema-to-pydantic", marker = "extra == 'http-tool'", specifier = ">=0.2.0" },
|
{ name = "json-schema-to-pydantic", marker = "extra == 'http-tool'", specifier = ">=0.2.0" },
|
||||||
{ name = "json-schema-to-pydantic", marker = "extra == 'mcp'", specifier = ">=0.2.3" },
|
{ name = "json-schema-to-pydantic", marker = "extra == 'mcp'", specifier = ">=0.2.4" },
|
||||||
{ name = "langchain-core", marker = "extra == 'langchain'", specifier = "~=0.3.3" },
|
{ name = "langchain-core", marker = "extra == 'langchain'", specifier = "~=0.3.3" },
|
||||||
{ name = "llama-cpp-python", marker = "extra == 'llama-cpp'", specifier = ">=0.3.8" },
|
{ name = "llama-cpp-python", marker = "extra == 'llama-cpp'", specifier = ">=0.3.8" },
|
||||||
{ name = "magika", marker = "extra == 'file-surfer'", specifier = ">=0.6.1rc2" },
|
{ name = "magika", marker = "extra == 'file-surfer'", specifier = ">=0.6.1rc2" },
|
||||||
|
@ -786,6 +787,7 @@ dev = [
|
||||||
{ name = "autogen-test-utils", editable = "packages/autogen-test-utils" },
|
{ name = "autogen-test-utils", editable = "packages/autogen-test-utils" },
|
||||||
{ name = "httpx", specifier = ">=0.28.1" },
|
{ name = "httpx", specifier = ">=0.28.1" },
|
||||||
{ name = "langchain-experimental" },
|
{ name = "langchain-experimental" },
|
||||||
|
{ name = "opentelemetry-proto", specifier = ">=1.28.0" },
|
||||||
{ name = "pandas-stubs", specifier = ">=2.2.3.241126" },
|
{ name = "pandas-stubs", specifier = ">=2.2.3.241126" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -3044,14 +3046,14 @@ wheels = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "json-schema-to-pydantic"
|
name = "json-schema-to-pydantic"
|
||||||
version = "0.2.3"
|
version = "0.2.4"
|
||||||
source = { registry = "https://pypi.org/simple" }
|
source = { registry = "https://pypi.org/simple" }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "pydantic" },
|
{ name = "pydantic" },
|
||||||
]
|
]
|
||||||
sdist = { url = "https://files.pythonhosted.org/packages/f2/8d/da0e791baf63a957ff67e0706d59386b72ab87858e616b6fcfc9b58cd910/json_schema_to_pydantic-0.2.3.tar.gz", hash = "sha256:c76db1f6001996895328e7aa174aae201d85d1f5e79d592c272ea03c8586e453", size = 35305 }
|
sdist = { url = "https://files.pythonhosted.org/packages/0e/5a/82ce52917b4b021e739dc02384bb3257b5ddd04e40211eacdc32c88bdda5/json_schema_to_pydantic-0.2.4.tar.gz", hash = "sha256:c24060aa7694ae7be0465ce11339a6d1cc8a72cd8f4378c889d19722fa7da1ee", size = 37816 }
|
||||||
wheels = [
|
wheels = [
|
||||||
{ url = "https://files.pythonhosted.org/packages/4a/55/81bbfbc806aab8dc4a21ad1c9c7fd61f94f2b4076ea64f1730a0368831a2/json_schema_to_pydantic-0.2.3-py3-none-any.whl", hash = "sha256:fe0c04357aa8d27ad5a46e54c2d6a8f35ca6c10b36e76a95c39827e38397f427", size = 11699 },
|
{ url = "https://files.pythonhosted.org/packages/2e/86/35135e8e4b1da50e6e8ed2afcacce589e576f3460c892d5e616390a4eb71/json_schema_to_pydantic-0.2.4-py3-none-any.whl", hash = "sha256:5c46675df0ab2685d92ed805da38348a34488654cb95ceb1a564dda23dcc3a89", size = 11940 },
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
|
Loading…
Reference in New Issue