mirror of https://github.com/microsoft/autogen.git
parent
afaf2c1288
commit
8707729da9
|
@ -41,6 +41,7 @@ jobs:
|
|||
{ ref: "v0.4.0.dev6", dest-dir: "0.4.0.dev6" },
|
||||
{ ref: "v0.4.0.dev7", dest-dir: "0.4.0.dev7" },
|
||||
{ ref: "v0.4.0.dev8", dest-dir: "0.4.0.dev8" },
|
||||
{ ref: "v0.4.0.dev9", dest-dir: "0.4.0.dev9" },
|
||||
]
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
|
|
@ -49,8 +49,8 @@ We will update verion numbers according to the following rules:
|
|||
|
||||
1. Create a PR that updates the version numbers across the codebase ([example](https://github.com/microsoft/autogen/pull/4359))
|
||||
2. The docs CI will fail for the PR, but this is expected and will be resolved in the next step
|
||||
2. After merging the PR, create and push a tag that corresponds to the new verion. For example, for `0.4.0.dev8`:
|
||||
- `git tag 0.4.0.dev8 && git push origin 0.4.0.dev8`
|
||||
2. After merging the PR, create and push a tag that corresponds to the new verion. For example, for `0.4.0.dev9`:
|
||||
- `git tag 0.4.0.dev9 && git push origin 0.4.0.dev9`
|
||||
3. Restart the docs CI by finding the failed [job corresponding to the `push` event](https://github.com/microsoft/autogen/actions/workflows/docs.yml) and restarting all jobs
|
||||
4. Run [this](https://github.com/microsoft/autogen/actions/workflows/single-python-package.yml) workflow for each of the packages that need to be released and get an approval for the release for it to run
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
[](https://twitter.com/pyautogen) [](https://www.linkedin.com/company/105812540)
|
||||
[](https://github.com/microsoft/autogen/discussions) [](https://microsoft.github.io/autogen/0.2/) [](https://microsoft.github.io/autogen/dev/)
|
||||
[](https://pypi.org/project/autogen-core/0.4.0.dev8/) [](https://pypi.org/project/autogen-agentchat/0.4.0.dev8/) [](https://pypi.org/project/autogen-ext/0.4.0.dev8/)
|
||||
[](https://pypi.org/project/autogen-core/0.4.0.dev9/) [](https://pypi.org/project/autogen-agentchat/0.4.0.dev9/) [](https://pypi.org/project/autogen-ext/0.4.0.dev9/)
|
||||
|
||||
</div>
|
||||
|
||||
|
@ -105,7 +105,7 @@ We look forward to your contributions!
|
|||
First install the packages:
|
||||
|
||||
```bash
|
||||
pip install 'autogen-agentchat==0.4.0.dev8' 'autogen-ext[openai]==0.4.0.dev8'
|
||||
pip install 'autogen-agentchat==0.4.0.dev9' 'autogen-ext[openai]==0.4.0.dev9'
|
||||
```
|
||||
|
||||
The following code uses OpenAI's GPT-4o model and you need to provide your
|
||||
|
|
|
@ -51,7 +51,12 @@
|
|||
{
|
||||
"name": "0.4.0.dev8",
|
||||
"version": "0.4.0.dev8",
|
||||
"url": "/autogen/0.4.0.dev8/",
|
||||
"url": "/autogen/0.4.0.dev8/"
|
||||
},
|
||||
{
|
||||
"name": "0.4.0.dev9",
|
||||
"version": "0.4.0.dev9",
|
||||
"url": "/autogen/0.4.0.dev9/",
|
||||
"preferred": true
|
||||
}
|
||||
]
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# AutoGen Python packages
|
||||
|
||||
[](https://microsoft.github.io/autogen/dev/)
|
||||
[](https://pypi.org/project/autogen-core/0.4.0.dev8/) [](https://pypi.org/project/autogen-agentchat/0.4.0.dev8/) [](https://pypi.org/project/autogen-ext/0.4.0.dev8/)
|
||||
[](https://pypi.org/project/autogen-core/0.4.0.dev9/) [](https://pypi.org/project/autogen-agentchat/0.4.0.dev9/) [](https://pypi.org/project/autogen-ext/0.4.0.dev9/)
|
||||
|
||||
This directory works as a single `uv` workspace containing all project packages. See [`packages`](./packages/) to discover all project packages.
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|||
|
||||
[project]
|
||||
name = "autogen-agentchat"
|
||||
version = "0.4.0.dev8"
|
||||
version = "0.4.0.dev9"
|
||||
license = {file = "LICENSE-CODE"}
|
||||
description = "AutoGen agents and teams library"
|
||||
readme = "README.md"
|
||||
|
@ -15,7 +15,7 @@ classifiers = [
|
|||
"Operating System :: OS Independent",
|
||||
]
|
||||
dependencies = [
|
||||
"autogen-core==0.4.0.dev8",
|
||||
"autogen-core==0.4.0.dev9",
|
||||
]
|
||||
|
||||
[tool.uv]
|
||||
|
|
|
@ -56,7 +56,7 @@ AgentChat </div>
|
|||
High-level API that includes preset agents and teams for building multi-agent systems.
|
||||
|
||||
```sh
|
||||
pip install 'autogen-agentchat==0.4.0.dev8'
|
||||
pip install 'autogen-agentchat==0.4.0.dev9'
|
||||
```
|
||||
|
||||
💡 *Start here if you are looking for an API similar to AutoGen 0.2.*
|
||||
|
@ -77,7 +77,7 @@ Get Started
|
|||
Provides building blocks for creating asynchronous, event driven multi-agent systems.
|
||||
|
||||
```sh
|
||||
pip install 'autogen-core==0.4.0.dev8'
|
||||
pip install 'autogen-core==0.4.0.dev9'
|
||||
```
|
||||
|
||||
+++
|
||||
|
|
|
@ -31,10 +31,10 @@ myst:
|
|||
Library that is at a similar level of abstraction as AutoGen 0.2, including default agents and group chat.
|
||||
|
||||
```sh
|
||||
pip install 'autogen-agentchat==0.4.0.dev8'
|
||||
pip install 'autogen-agentchat==0.4.0.dev9'
|
||||
```
|
||||
|
||||
[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/agentchat-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_agentchat.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-agentchat/0.4.0.dev8/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-agentchat)
|
||||
[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/agentchat-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_agentchat.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-agentchat/0.4.0.dev9/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-agentchat)
|
||||
:::
|
||||
|
||||
(pkg-info-autogen-core)=
|
||||
|
@ -46,10 +46,10 @@ pip install 'autogen-agentchat==0.4.0.dev8'
|
|||
Implements the core functionality of the AutoGen framework, providing basic building blocks for creating multi-agent systems.
|
||||
|
||||
```sh
|
||||
pip install 'autogen-core==0.4.0.dev8'
|
||||
pip install 'autogen-core==0.4.0.dev9'
|
||||
```
|
||||
|
||||
[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/core-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_core.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-core/0.4.0.dev8/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core)
|
||||
[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/core-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_core.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-core/0.4.0.dev9/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core)
|
||||
:::
|
||||
|
||||
(pkg-info-autogen-ext)=
|
||||
|
@ -61,7 +61,7 @@ pip install 'autogen-core==0.4.0.dev8'
|
|||
Implementations of core components that interface with external services, or use extra dependencies. For example, Docker based code execution.
|
||||
|
||||
```sh
|
||||
pip install 'autogen-ext==0.4.0.dev8'
|
||||
pip install 'autogen-ext==0.4.0.dev9'
|
||||
```
|
||||
|
||||
Extras:
|
||||
|
@ -71,7 +71,7 @@ Extras:
|
|||
- `docker` needed for {py:class}`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor`
|
||||
- `openai` needed for {py:class}`~autogen_ext.models.OpenAIChatCompletionClient`
|
||||
|
||||
[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/extensions-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_ext.agents.web_surfer.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-ext/0.4.0.dev8/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-ext)
|
||||
[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/extensions-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_ext.agents.web_surfer.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-ext/0.4.0.dev9/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-ext)
|
||||
:::
|
||||
|
||||
(pkg-info-autogen-magentic-one)=
|
||||
|
|
|
@ -61,7 +61,7 @@ Install the `autogen-agentchat` package using pip:
|
|||
|
||||
```bash
|
||||
|
||||
pip install 'autogen-agentchat==0.4.0.dev8'
|
||||
pip install 'autogen-agentchat==0.4.0.dev9'
|
||||
```
|
||||
|
||||
```{note}
|
||||
|
@ -74,7 +74,7 @@ To use the OpenAI and Azure OpenAI models, you need to install the following
|
|||
extensions:
|
||||
|
||||
```bash
|
||||
pip install 'autogen-ext[openai]==0.4.0.dev8'
|
||||
pip install 'autogen-ext[openai]==0.4.0.dev9'
|
||||
```
|
||||
|
||||
## Install Docker for Code Execution
|
||||
|
|
|
@ -1,153 +1,153 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Quickstart"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Via AgentChat, you can build applications quickly using preset agents.\n",
|
||||
"To illustrate this, we will begin with creating a team of a single agent\n",
|
||||
"that can use tools and respond to messages.\n",
|
||||
"\n",
|
||||
"The following code uses the OpenAI model. If you haven't already, you need to\n",
|
||||
"install the following package and extension:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install 'autogen-agentchat==0.4.0.dev8' 'autogen-ext[openai]==0.4.0.dev8'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To use Azure OpenAI models and AAD authentication,\n",
|
||||
"you can follow the instructions [here](./tutorial/models.ipynb#azure-openai)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"---------- user ----------\n",
|
||||
"What is the weather in New York?\n",
|
||||
"---------- weather_agent ----------\n",
|
||||
"[FunctionCall(id='call_AhTZ2q3TNL8x0qs00e3wIZ7y', arguments='{\"city\":\"New York\"}', name='get_weather')]\n",
|
||||
"[Prompt tokens: 79, Completion tokens: 15]\n",
|
||||
"---------- weather_agent ----------\n",
|
||||
"[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_AhTZ2q3TNL8x0qs00e3wIZ7y')]\n",
|
||||
"---------- weather_agent ----------\n",
|
||||
"The weather in New York is currently 73 degrees and sunny.\n",
|
||||
"[Prompt tokens: 90, Completion tokens: 14]\n",
|
||||
"---------- weather_agent ----------\n",
|
||||
"TERMINATE\n",
|
||||
"[Prompt tokens: 137, Completion tokens: 4]\n",
|
||||
"---------- Summary ----------\n",
|
||||
"Number of messages: 5\n",
|
||||
"Finish reason: Text 'TERMINATE' mentioned\n",
|
||||
"Total prompt tokens: 306\n",
|
||||
"Total completion tokens: 33\n",
|
||||
"Duration: 1.43 seconds\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogen_agentchat.agents import AssistantAgent\n",
|
||||
"from autogen_agentchat.conditions import TextMentionTermination\n",
|
||||
"from autogen_agentchat.teams import RoundRobinGroupChat\n",
|
||||
"from autogen_agentchat.ui import Console\n",
|
||||
"from autogen_ext.models import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define a tool\n",
|
||||
"async def get_weather(city: str) -> str:\n",
|
||||
" return f\"The weather in {city} is 73 degrees and Sunny.\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def main() -> None:\n",
|
||||
" # Define an agent\n",
|
||||
" weather_agent = AssistantAgent(\n",
|
||||
" name=\"weather_agent\",\n",
|
||||
" model_client=OpenAIChatCompletionClient(\n",
|
||||
" model=\"gpt-4o-2024-08-06\",\n",
|
||||
" # api_key=\"YOUR_API_KEY\",\n",
|
||||
" ),\n",
|
||||
" tools=[get_weather],\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Define termination condition\n",
|
||||
" termination = TextMentionTermination(\"TERMINATE\")\n",
|
||||
"\n",
|
||||
" # Define a team\n",
|
||||
" agent_team = RoundRobinGroupChat([weather_agent], termination_condition=termination)\n",
|
||||
"\n",
|
||||
" # Run the team and stream messages to the console\n",
|
||||
" stream = agent_team.run_stream(task=\"What is the weather in New York?\")\n",
|
||||
" await Console(stream)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# NOTE: if running this inside a Python script you'll need to use asyncio.run(main()).\n",
|
||||
"await main()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The code snippet above introduces two high level concepts in AgentChat: *Agent* and *Team*. An Agent helps us define what actions are taken when a message is received. Specifically, we use the {py:class}`~autogen_agentchat.agents.AssistantAgent` preset - an agent that can be given access to a model (e.g., LLM) and tools (functions) that it can then use to address tasks. A Team helps us define the rules for how agents interact with each other. In the {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` team, agents respond in a sequential round-robin fashion.\n",
|
||||
"In this case, we have a single agent, so the same agent is used for each round."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## What's Next?\n",
|
||||
"\n",
|
||||
"Now that you have a basic understanding of how to define an agent and a team, consider following the [tutorial](./tutorial/index) for a walkthrough on other features of AgentChat.\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Quickstart"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Via AgentChat, you can build applications quickly using preset agents.\n",
|
||||
"To illustrate this, we will begin with creating a team of a single agent\n",
|
||||
"that can use tools and respond to messages.\n",
|
||||
"\n",
|
||||
"The following code uses the OpenAI model. If you haven't already, you need to\n",
|
||||
"install the following package and extension:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install 'autogen-agentchat==0.4.0.dev9' 'autogen-ext[openai]==0.4.0.dev9'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To use Azure OpenAI models and AAD authentication,\n",
|
||||
"you can follow the instructions [here](./tutorial/models.ipynb#azure-openai)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"---------- user ----------\n",
|
||||
"What is the weather in New York?\n",
|
||||
"---------- weather_agent ----------\n",
|
||||
"[FunctionCall(id='call_AhTZ2q3TNL8x0qs00e3wIZ7y', arguments='{\"city\":\"New York\"}', name='get_weather')]\n",
|
||||
"[Prompt tokens: 79, Completion tokens: 15]\n",
|
||||
"---------- weather_agent ----------\n",
|
||||
"[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_AhTZ2q3TNL8x0qs00e3wIZ7y')]\n",
|
||||
"---------- weather_agent ----------\n",
|
||||
"The weather in New York is currently 73 degrees and sunny.\n",
|
||||
"[Prompt tokens: 90, Completion tokens: 14]\n",
|
||||
"---------- weather_agent ----------\n",
|
||||
"TERMINATE\n",
|
||||
"[Prompt tokens: 137, Completion tokens: 4]\n",
|
||||
"---------- Summary ----------\n",
|
||||
"Number of messages: 5\n",
|
||||
"Finish reason: Text 'TERMINATE' mentioned\n",
|
||||
"Total prompt tokens: 306\n",
|
||||
"Total completion tokens: 33\n",
|
||||
"Duration: 1.43 seconds\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogen_agentchat.agents import AssistantAgent\n",
|
||||
"from autogen_agentchat.conditions import TextMentionTermination\n",
|
||||
"from autogen_agentchat.teams import RoundRobinGroupChat\n",
|
||||
"from autogen_agentchat.ui import Console\n",
|
||||
"from autogen_ext.models import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define a tool\n",
|
||||
"async def get_weather(city: str) -> str:\n",
|
||||
" return f\"The weather in {city} is 73 degrees and Sunny.\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def main() -> None:\n",
|
||||
" # Define an agent\n",
|
||||
" weather_agent = AssistantAgent(\n",
|
||||
" name=\"weather_agent\",\n",
|
||||
" model_client=OpenAIChatCompletionClient(\n",
|
||||
" model=\"gpt-4o-2024-08-06\",\n",
|
||||
" # api_key=\"YOUR_API_KEY\",\n",
|
||||
" ),\n",
|
||||
" tools=[get_weather],\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Define termination condition\n",
|
||||
" termination = TextMentionTermination(\"TERMINATE\")\n",
|
||||
"\n",
|
||||
" # Define a team\n",
|
||||
" agent_team = RoundRobinGroupChat([weather_agent], termination_condition=termination)\n",
|
||||
"\n",
|
||||
" # Run the team and stream messages to the console\n",
|
||||
" stream = agent_team.run_stream(task=\"What is the weather in New York?\")\n",
|
||||
" await Console(stream)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# NOTE: if running this inside a Python script you'll need to use asyncio.run(main()).\n",
|
||||
"await main()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The code snippet above introduces two high level concepts in AgentChat: *Agent* and *Team*. An Agent helps us define what actions are taken when a message is received. Specifically, we use the {py:class}`~autogen_agentchat.agents.AssistantAgent` preset - an agent that can be given access to a model (e.g., LLM) and tools (functions) that it can then use to address tasks. A Team helps us define the rules for how agents interact with each other. In the {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` team, agents respond in a sequential round-robin fashion.\n",
|
||||
"In this case, we have a single agent, so the same agent is used for each round."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## What's Next?\n",
|
||||
"\n",
|
||||
"Now that you have a basic understanding of how to define an agent and a team, consider following the [tutorial](./tutorial/index) for a walkthrough on other features of AgentChat.\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
|
|
@ -1,187 +1,187 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Models\n",
|
||||
"\n",
|
||||
"In many cases, agents need access to LLM model services such as OpenAI, Azure OpenAI, or local models. Since there are many different providers with different APIs, `autogen-core` implements a protocol for [model clients](../../core-user-guide/framework/model-clients.ipynb) and `autogen-ext` implements a set of model clients for popular model services. AgentChat can use these model clients to interact with model services. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## OpenAI\n",
|
||||
"\n",
|
||||
"To access OpenAI models, install the `openai` extension, which allows you to use the {py:class}`~autogen_ext.models.OpenAIChatCompletionClient`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install 'autogen-ext[openai]==0.4.0.dev8'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You will also need to obtain an [API key](https://platform.openai.com/account/api-keys) from OpenAI."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_ext.models import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"opneai_model_client = OpenAIChatCompletionClient(\n",
|
||||
" model=\"gpt-4o-2024-08-06\",\n",
|
||||
" # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY environment variable set.\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To test the model client, you can use the following code:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CreateResult(finish_reason='stop', content='The capital of France is Paris.', usage=RequestUsage(prompt_tokens=15, completion_tokens=7), cached=False, logprobs=None)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogen_core.components.models import UserMessage\n",
|
||||
"\n",
|
||||
"result = await opneai_model_client.create([UserMessage(content=\"What is the capital of France?\", source=\"user\")])\n",
|
||||
"print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{note}\n",
|
||||
"You can use this client with models hosted on OpenAI-compatible endpoints, however, we have not tested this functionality.\n",
|
||||
"See {py:class}`~autogen_ext.models.OpenAIChatCompletionClient` for more information.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Azure OpenAI\n",
|
||||
"\n",
|
||||
"Similarly, install the `azure` and `openai` extensions to use the {py:class}`~autogen_ext.models.AzureOpenAIChatCompletionClient`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install 'autogen-ext[openai,azure]==0.4.0.dev8'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To use the client, you need to provide your deployment id, Azure Cognitive Services endpoint, api version, and model capabilities.\n",
|
||||
"For authentication, you can either provide an API key or an Azure Active Directory (AAD) token credential.\n",
|
||||
"\n",
|
||||
"The following code snippet shows how to use AAD authentication.\n",
|
||||
"The identity used must be assigned the [Cognitive Services OpenAI User](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/role-based-access-control#cognitive-services-openai-user) role."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_ext.models import AzureOpenAIChatCompletionClient\n",
|
||||
"from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n",
|
||||
"\n",
|
||||
"# Create the token provider\n",
|
||||
"token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n",
|
||||
"\n",
|
||||
"az_model_client = AzureOpenAIChatCompletionClient(\n",
|
||||
" azure_deployment=\"{your-azure-deployment}\",\n",
|
||||
" model=\"{model-name, such as gpt-4o}\",\n",
|
||||
" api_version=\"2024-06-01\",\n",
|
||||
" azure_endpoint=\"https://{your-custom-endpoint}.openai.azure.com/\",\n",
|
||||
" azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication.\n",
|
||||
" # api_key=\"sk-...\", # For key-based authentication.\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See [here](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity#chat-completions) for how to use the Azure client directly or for more information."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Local Models\n",
|
||||
"\n",
|
||||
"See [this guide](../../core-user-guide/faqs.md#what-are-model-capabilities-and-how-do-i-specify-them) for how to override a model's default capabilities definitions in autogen.\n",
|
||||
"\n",
|
||||
"More to come. Stay tuned!"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Models\n",
|
||||
"\n",
|
||||
"In many cases, agents need access to LLM model services such as OpenAI, Azure OpenAI, or local models. Since there are many different providers with different APIs, `autogen-core` implements a protocol for [model clients](../../core-user-guide/framework/model-clients.ipynb) and `autogen-ext` implements a set of model clients for popular model services. AgentChat can use these model clients to interact with model services. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## OpenAI\n",
|
||||
"\n",
|
||||
"To access OpenAI models, install the `openai` extension, which allows you to use the {py:class}`~autogen_ext.models.OpenAIChatCompletionClient`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install 'autogen-ext[openai]==0.4.0.dev9'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You will also need to obtain an [API key](https://platform.openai.com/account/api-keys) from OpenAI."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_ext.models import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"opneai_model_client = OpenAIChatCompletionClient(\n",
|
||||
" model=\"gpt-4o-2024-08-06\",\n",
|
||||
" # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY environment variable set.\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To test the model client, you can use the following code:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CreateResult(finish_reason='stop', content='The capital of France is Paris.', usage=RequestUsage(prompt_tokens=15, completion_tokens=7), cached=False, logprobs=None)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogen_core.components.models import UserMessage\n",
|
||||
"\n",
|
||||
"result = await opneai_model_client.create([UserMessage(content=\"What is the capital of France?\", source=\"user\")])\n",
|
||||
"print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{note}\n",
|
||||
"You can use this client with models hosted on OpenAI-compatible endpoints, however, we have not tested this functionality.\n",
|
||||
"See {py:class}`~autogen_ext.models.OpenAIChatCompletionClient` for more information.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Azure OpenAI\n",
|
||||
"\n",
|
||||
"Similarly, install the `azure` and `openai` extensions to use the {py:class}`~autogen_ext.models.AzureOpenAIChatCompletionClient`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install 'autogen-ext[openai,azure]==0.4.0.dev9'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To use the client, you need to provide your deployment id, Azure Cognitive Services endpoint, api version, and model capabilities.\n",
|
||||
"For authentication, you can either provide an API key or an Azure Active Directory (AAD) token credential.\n",
|
||||
"\n",
|
||||
"The following code snippet shows how to use AAD authentication.\n",
|
||||
"The identity used must be assigned the [Cognitive Services OpenAI User](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/role-based-access-control#cognitive-services-openai-user) role."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_ext.models import AzureOpenAIChatCompletionClient\n",
|
||||
"from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n",
|
||||
"\n",
|
||||
"# Create the token provider\n",
|
||||
"token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n",
|
||||
"\n",
|
||||
"az_model_client = AzureOpenAIChatCompletionClient(\n",
|
||||
" azure_deployment=\"{your-azure-deployment}\",\n",
|
||||
" model=\"{model-name, such as gpt-4o}\",\n",
|
||||
" api_version=\"2024-06-01\",\n",
|
||||
" azure_endpoint=\"https://{your-custom-endpoint}.openai.azure.com/\",\n",
|
||||
" azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication.\n",
|
||||
" # api_key=\"sk-...\", # For key-based authentication.\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See [here](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity#chat-completions) for how to use the Azure client directly or for more information."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Local Models\n",
|
||||
"\n",
|
||||
"See [this guide](../../core-user-guide/faqs.md#what-are-model-capabilities-and-how-do-i-specify-them) for how to override a model's default capabilities definitions in autogen.\n",
|
||||
"\n",
|
||||
"More to come. Stay tuned!"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
|
|
@ -1,222 +1,222 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Distributed Agent Runtime\n",
|
||||
"\n",
|
||||
"```{attention}\n",
|
||||
"The distributed agent runtime is an experimental feature. Expect breaking changes\n",
|
||||
"to the API.\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"A distributed agent runtime facilitates communication and agent lifecycle management\n",
|
||||
"across process boundaries.\n",
|
||||
"It consists of a host service and at least one worker runtime.\n",
|
||||
"\n",
|
||||
"The host service maintains connections to all active worker runtimes,\n",
|
||||
"facilitates message delivery, and keeps sessions for all direct messages (i.e., RPCs).\n",
|
||||
"A worker runtime processes application code (agents) and connects to the host service.\n",
|
||||
"It also advertises the agents which they support to the host service,\n",
|
||||
"so the host service can deliver messages to the correct worker.\n",
|
||||
"\n",
|
||||
"````{note}\n",
|
||||
"The distributed agent runtime requires extra dependencies, install them using:\n",
|
||||
"```bash\n",
|
||||
"pip install autogen-core[grpc]==0.4.0.dev8\n",
|
||||
"```\n",
|
||||
"````\n",
|
||||
"\n",
|
||||
"We can start a host service using {py:class}`~autogen_core.application.GrpcWorkerAgentRuntimeHost`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntimeHost\n",
|
||||
"\n",
|
||||
"host = GrpcWorkerAgentRuntimeHost(address=\"localhost:50051\")\n",
|
||||
"host.start() # Start a host service in the background."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above code starts the host service in the background and accepts\n",
|
||||
"worker connections on port 50051.\n",
|
||||
"\n",
|
||||
"Before running worker runtimes, let's define our agent.\n",
|
||||
"The agent will publish a new message on every message it receives.\n",
|
||||
"It also keeps track of how many messages it has published, and \n",
|
||||
"stops publishing new messages once it has published 5 messages."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"\n",
|
||||
"from autogen_core import DefaultTopicId, MessageContext, RoutedAgent, default_subscription, message_handler\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class MyMessage:\n",
|
||||
" content: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@default_subscription\n",
|
||||
"class MyAgent(RoutedAgent):\n",
|
||||
" def __init__(self, name: str) -> None:\n",
|
||||
" super().__init__(\"My agent\")\n",
|
||||
" self._name = name\n",
|
||||
" self._counter = 0\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def my_message_handler(self, message: MyMessage, ctx: MessageContext) -> None:\n",
|
||||
" self._counter += 1\n",
|
||||
" if self._counter > 5:\n",
|
||||
" return\n",
|
||||
" content = f\"{self._name}: Hello x {self._counter}\"\n",
|
||||
" print(content)\n",
|
||||
" await self.publish_message(MyMessage(content=content), DefaultTopicId())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can set up the worker agent runtimes.\n",
|
||||
"We use {py:class}`~autogen_core.application.GrpcWorkerAgentRuntime`.\n",
|
||||
"We set up two worker runtimes. Each runtime hosts one agent.\n",
|
||||
"All agents publish and subscribe to the default topic, so they can see all\n",
|
||||
"messages being published.\n",
|
||||
"\n",
|
||||
"To run the agents, we publishes a message from a worker."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"worker1: Hello x 1\n",
|
||||
"worker2: Hello x 1\n",
|
||||
"worker2: Hello x 2\n",
|
||||
"worker1: Hello x 2\n",
|
||||
"worker1: Hello x 3\n",
|
||||
"worker2: Hello x 3\n",
|
||||
"worker2: Hello x 4\n",
|
||||
"worker1: Hello x 4\n",
|
||||
"worker1: Hello x 5\n",
|
||||
"worker2: Hello x 5\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime\n",
|
||||
"\n",
|
||||
"worker1 = GrpcWorkerAgentRuntime(host_address=\"localhost:50051\")\n",
|
||||
"worker1.start()\n",
|
||||
"await MyAgent.register(worker1, \"worker1\", lambda: MyAgent(\"worker1\"))\n",
|
||||
"\n",
|
||||
"worker2 = GrpcWorkerAgentRuntime(host_address=\"localhost:50051\")\n",
|
||||
"worker2.start()\n",
|
||||
"await MyAgent.register(worker2, \"worker2\", lambda: MyAgent(\"worker2\"))\n",
|
||||
"\n",
|
||||
"await worker2.publish_message(MyMessage(content=\"Hello!\"), DefaultTopicId())\n",
|
||||
"\n",
|
||||
"# Let the agents run for a while.\n",
|
||||
"await asyncio.sleep(5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can see each agent published exactly 5 messages.\n",
|
||||
"\n",
|
||||
"To stop the worker runtimes, we can call {py:meth}`~autogen_core.application.GrpcWorkerAgentRuntime.stop`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"await worker1.stop()\n",
|
||||
"await worker2.stop()\n",
|
||||
"\n",
|
||||
"# To keep the worker running until a termination signal is received (e.g., SIGTERM).\n",
|
||||
"# await worker1.stop_when_signal()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can call {py:meth}`~autogen_core.application.GrpcWorkerAgentRuntimeHost.stop`\n",
|
||||
"to stop the host service."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"await host.stop()\n",
|
||||
"\n",
|
||||
"# To keep the host service running until a termination signal (e.g., SIGTERM)\n",
|
||||
"# await host.stop_when_signal()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Next Steps\n",
|
||||
"To see complete examples of using distributed runtime, please take a look at the following samples:\n",
|
||||
"\n",
|
||||
"- [Distributed Workers](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/samples/worker) \n",
|
||||
"- [Distributed Semantic Router](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/samples/semantic_router) \n",
|
||||
"- [Distributed Group Chat](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/samples/distributed-group-chat) \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "agnext",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Distributed Agent Runtime\n",
|
||||
"\n",
|
||||
"```{attention}\n",
|
||||
"The distributed agent runtime is an experimental feature. Expect breaking changes\n",
|
||||
"to the API.\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"A distributed agent runtime facilitates communication and agent lifecycle management\n",
|
||||
"across process boundaries.\n",
|
||||
"It consists of a host service and at least one worker runtime.\n",
|
||||
"\n",
|
||||
"The host service maintains connections to all active worker runtimes,\n",
|
||||
"facilitates message delivery, and keeps sessions for all direct messages (i.e., RPCs).\n",
|
||||
"A worker runtime processes application code (agents) and connects to the host service.\n",
|
||||
"It also advertises the agents which they support to the host service,\n",
|
||||
"so the host service can deliver messages to the correct worker.\n",
|
||||
"\n",
|
||||
"````{note}\n",
|
||||
"The distributed agent runtime requires extra dependencies, install them using:\n",
|
||||
"```bash\n",
|
||||
"pip install autogen-core[grpc]==0.4.0.dev9\n",
|
||||
"```\n",
|
||||
"````\n",
|
||||
"\n",
|
||||
"We can start a host service using {py:class}`~autogen_core.application.GrpcWorkerAgentRuntimeHost`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntimeHost\n",
|
||||
"\n",
|
||||
"host = GrpcWorkerAgentRuntimeHost(address=\"localhost:50051\")\n",
|
||||
"host.start() # Start a host service in the background."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above code starts the host service in the background and accepts\n",
|
||||
"worker connections on port 50051.\n",
|
||||
"\n",
|
||||
"Before running worker runtimes, let's define our agent.\n",
|
||||
"The agent will publish a new message on every message it receives.\n",
|
||||
"It also keeps track of how many messages it has published, and \n",
|
||||
"stops publishing new messages once it has published 5 messages."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dataclasses import dataclass\n",
|
||||
"\n",
|
||||
"from autogen_core import DefaultTopicId, MessageContext, RoutedAgent, default_subscription, message_handler\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@dataclass\n",
|
||||
"class MyMessage:\n",
|
||||
" content: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@default_subscription\n",
|
||||
"class MyAgent(RoutedAgent):\n",
|
||||
" def __init__(self, name: str) -> None:\n",
|
||||
" super().__init__(\"My agent\")\n",
|
||||
" self._name = name\n",
|
||||
" self._counter = 0\n",
|
||||
"\n",
|
||||
" @message_handler\n",
|
||||
" async def my_message_handler(self, message: MyMessage, ctx: MessageContext) -> None:\n",
|
||||
" self._counter += 1\n",
|
||||
" if self._counter > 5:\n",
|
||||
" return\n",
|
||||
" content = f\"{self._name}: Hello x {self._counter}\"\n",
|
||||
" print(content)\n",
|
||||
" await self.publish_message(MyMessage(content=content), DefaultTopicId())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can set up the worker agent runtimes.\n",
|
||||
"We use {py:class}`~autogen_core.application.GrpcWorkerAgentRuntime`.\n",
|
||||
"We set up two worker runtimes. Each runtime hosts one agent.\n",
|
||||
"All agents publish and subscribe to the default topic, so they can see all\n",
|
||||
"messages being published.\n",
|
||||
"\n",
|
||||
"To run the agents, we publishes a message from a worker."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"worker1: Hello x 1\n",
|
||||
"worker2: Hello x 1\n",
|
||||
"worker2: Hello x 2\n",
|
||||
"worker1: Hello x 2\n",
|
||||
"worker1: Hello x 3\n",
|
||||
"worker2: Hello x 3\n",
|
||||
"worker2: Hello x 4\n",
|
||||
"worker1: Hello x 4\n",
|
||||
"worker1: Hello x 5\n",
|
||||
"worker2: Hello x 5\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime\n",
|
||||
"\n",
|
||||
"worker1 = GrpcWorkerAgentRuntime(host_address=\"localhost:50051\")\n",
|
||||
"worker1.start()\n",
|
||||
"await MyAgent.register(worker1, \"worker1\", lambda: MyAgent(\"worker1\"))\n",
|
||||
"\n",
|
||||
"worker2 = GrpcWorkerAgentRuntime(host_address=\"localhost:50051\")\n",
|
||||
"worker2.start()\n",
|
||||
"await MyAgent.register(worker2, \"worker2\", lambda: MyAgent(\"worker2\"))\n",
|
||||
"\n",
|
||||
"await worker2.publish_message(MyMessage(content=\"Hello!\"), DefaultTopicId())\n",
|
||||
"\n",
|
||||
"# Let the agents run for a while.\n",
|
||||
"await asyncio.sleep(5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can see each agent published exactly 5 messages.\n",
|
||||
"\n",
|
||||
"To stop the worker runtimes, we can call {py:meth}`~autogen_core.application.GrpcWorkerAgentRuntime.stop`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"await worker1.stop()\n",
|
||||
"await worker2.stop()\n",
|
||||
"\n",
|
||||
"# To keep the worker running until a termination signal is received (e.g., SIGTERM).\n",
|
||||
"# await worker1.stop_when_signal()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can call {py:meth}`~autogen_core.application.GrpcWorkerAgentRuntimeHost.stop`\n",
|
||||
"to stop the host service."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"await host.stop()\n",
|
||||
"\n",
|
||||
"# To keep the host service running until a termination signal (e.g., SIGTERM)\n",
|
||||
"# await host.stop_when_signal()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Next Steps\n",
|
||||
"To see complete examples of using distributed runtime, please take a look at the following samples:\n",
|
||||
"\n",
|
||||
"- [Distributed Workers](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/samples/worker) \n",
|
||||
"- [Distributed Semantic Router](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/samples/semantic_router) \n",
|
||||
"- [Distributed Group Chat](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/samples/distributed-group-chat) \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "agnext",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|||
|
||||
[project]
|
||||
name = "autogen-core"
|
||||
version = "0.4.0.dev8"
|
||||
version = "0.4.0.dev9"
|
||||
license = {file = "LICENSE-CODE"}
|
||||
description = "Foundational interfaces and agent runtime implementation for AutoGen"
|
||||
readme = "README.md"
|
||||
|
@ -75,7 +75,7 @@ dev-dependencies = [
|
|||
"autodoc_pydantic~=2.2",
|
||||
"pygments",
|
||||
|
||||
"autogen_ext==0.4.0.dev8",
|
||||
"autogen_ext==0.4.0.dev9",
|
||||
|
||||
# Documentation tooling
|
||||
"sphinx-autobuild",
|
||||
|
|
|
@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|||
|
||||
[project]
|
||||
name = "autogen-ext"
|
||||
version = "0.4.0.dev8"
|
||||
version = "0.4.0.dev9"
|
||||
license = {file = "LICENSE-CODE"}
|
||||
description = "AutoGen extensions library"
|
||||
readme = "README.md"
|
||||
|
@ -15,7 +15,7 @@ classifiers = [
|
|||
"Operating System :: OS Independent",
|
||||
]
|
||||
dependencies = [
|
||||
"autogen-core==0.4.0.dev8",
|
||||
"autogen-core==0.4.0.dev9",
|
||||
]
|
||||
|
||||
|
||||
|
@ -35,7 +35,7 @@ magentic-one = [
|
|||
"pillow>=11.0.0",
|
||||
]
|
||||
video-surfer = [
|
||||
"autogen-agentchat==0.4.0.dev8",
|
||||
"autogen-agentchat==0.4.0.dev9",
|
||||
"opencv-python>=4.5",
|
||||
"ffmpeg-python",
|
||||
"openai-whisper",
|
||||
|
|
|
@ -51,7 +51,7 @@ class ACADynamicSessionsCodeExecutor(CodeExecutor):
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install 'autogen-ext[azure]==0.4.0.dev8'
|
||||
pip install 'autogen-ext[azure]==0.4.0.dev9'
|
||||
|
||||
.. caution::
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ class DockerCommandLineCodeExecutor(CodeExecutor):
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install 'autogen-ext[docker]==0.4.0.dev8'
|
||||
pip install 'autogen-ext[docker]==0.4.0.dev9'
|
||||
|
||||
|
||||
The executor first saves each code block in a file in the working
|
||||
|
|
|
@ -909,7 +909,7 @@ class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient):
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install 'autogen-ext[openai]==0.4.0.dev8'
|
||||
pip install 'autogen-ext[openai]==0.4.0.dev9'
|
||||
|
||||
The following code snippet shows how to use the client with an OpenAI model:
|
||||
|
||||
|
@ -989,7 +989,7 @@ class AzureOpenAIChatCompletionClient(BaseOpenAIChatCompletionClient):
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install 'autogen-ext[openai,azure]==0.4.0.dev8'
|
||||
pip install 'autogen-ext[openai,azure]==0.4.0.dev9'
|
||||
|
||||
To use the client, you need to provide your deployment id, Azure Cognitive Services endpoint,
|
||||
api version, and model capabilities.
|
||||
|
|
|
@ -33,9 +33,9 @@ dependencies = [
|
|||
"alembic",
|
||||
"loguru",
|
||||
"pyyaml",
|
||||
"autogen-core==0.4.0.dev8",
|
||||
"autogen-agentchat==0.4.0.dev8",
|
||||
"autogen-ext==0.4.0.dev8"
|
||||
"autogen-core==0.4.0.dev9",
|
||||
"autogen-agentchat==0.4.0.dev9",
|
||||
"autogen-ext==0.4.0.dev9"
|
||||
]
|
||||
optional-dependencies = {web = ["fastapi", "uvicorn"], database = ["psycopg"]}
|
||||
|
||||
|
|
|
@ -339,7 +339,7 @@ wheels = [
|
|||
|
||||
[[package]]
|
||||
name = "autogen-agentchat"
|
||||
version = "0.4.0.dev8"
|
||||
version = "0.4.0.dev9"
|
||||
source = { editable = "packages/autogen-agentchat" }
|
||||
dependencies = [
|
||||
{ name = "autogen-core" },
|
||||
|
@ -353,7 +353,7 @@ dev = []
|
|||
|
||||
[[package]]
|
||||
name = "autogen-core"
|
||||
version = "0.4.0.dev8"
|
||||
version = "0.4.0.dev9"
|
||||
source = { editable = "packages/autogen-core" }
|
||||
dependencies = [
|
||||
{ name = "aiohttp" },
|
||||
|
@ -478,7 +478,7 @@ dev = [
|
|||
|
||||
[[package]]
|
||||
name = "autogen-ext"
|
||||
version = "0.4.0.dev8"
|
||||
version = "0.4.0.dev9"
|
||||
source = { editable = "packages/autogen-ext" }
|
||||
dependencies = [
|
||||
{ name = "autogen-core" },
|
||||
|
|
Loading…
Reference in New Issue