Properly close model clients in documentation and samples (#5898)

Closes #5873
---------

Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
This commit is contained in:
Federico Villa 2025-03-20 08:50:14 +01:00 committed by GitHub
parent 3498c3ccda
commit 262c74fd41
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
35 changed files with 259 additions and 159 deletions

View File

@ -492,10 +492,12 @@
"from autogen_agentchat.teams import RoundRobinGroupChat\n",
"from autogen_agentchat.ui import Console\n",
"\n",
"model_client = OpenAIChatCompletionClient(model=\"gpt-4o-mini\")\n",
"\n",
"# Create the primary agent.\n",
"primary_agent = AssistantAgent(\n",
" \"primary\",\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n",
" model_client=model_client,\n",
" system_message=\"You are a helpful AI assistant.\",\n",
")\n",
"\n",
@ -512,7 +514,8 @@
"# Create a team with the primary and critic agents.\n",
"team = RoundRobinGroupChat([primary_agent, gemini_critic_agent], termination_condition=termination)\n",
"\n",
"await Console(team.run_stream(task=\"Write a Haiku poem with 4 lines about the fall season.\"))"
"await Console(team.run_stream(task=\"Write a Haiku poem with 4 lines about the fall season.\"))\n",
"await model_client.close()"
]
},
{

View File

@ -234,13 +234,15 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n",
"\n",
"search_agent = AssistantAgent(\n",
" name=\"Google_Search_Agent\",\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n",
" model_client=model_client,\n",
" tools=[google_search_tool],\n",
" description=\"Search Google for information, returns top 2 results with a snippet and body content\",\n",
" system_message=\"You are a helpful AI assistant. Solve tasks using your tools.\",\n",
@ -248,7 +250,7 @@
"\n",
"stock_analysis_agent = AssistantAgent(\n",
" name=\"Stock_Analysis_Agent\",\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n",
" model_client=model_client,\n",
" tools=[stock_analysis_tool],\n",
" description=\"Analyze stock data and generate a plot\",\n",
" system_message=\"Perform data analysis.\",\n",
@ -256,7 +258,7 @@
"\n",
"report_agent = AssistantAgent(\n",
" name=\"Report_Agent\",\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n",
" model_client=model_client,\n",
" description=\"Generate a report based the search and results of stock analysis\",\n",
" system_message=\"You are a helpful assistant that can generate a comprehensive report on a given topic based on search and stock analysis. When you done with generating the report, reply with TERMINATE.\",\n",
")"
@ -289,7 +291,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": null,
"metadata": {},
"outputs": [
{
@ -388,7 +390,9 @@
],
"source": [
"stream = team.run_stream(task=\"Write a financial report on American airlines\")\n",
"await Console(stream)"
"await Console(stream)\n",
"\n",
"await model_client.close()"
]
}
],

View File

@ -162,14 +162,16 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_client = OpenAIChatCompletionClient(model=\"gpt-4o-mini\")\n",
"\n",
"google_search_agent = AssistantAgent(\n",
" name=\"Google_Search_Agent\",\n",
" tools=[google_search_tool],\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n",
" model_client=model_client,\n",
" description=\"An agent that can search Google for information, returns results with a snippet and body content\",\n",
" system_message=\"You are a helpful AI assistant. Solve tasks using your tools.\",\n",
")\n",
@ -177,7 +179,7 @@
"arxiv_search_agent = AssistantAgent(\n",
" name=\"Arxiv_Search_Agent\",\n",
" tools=[arxiv_search_tool],\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n",
" model_client=model_client,\n",
" description=\"An agent that can search Arxiv for papers related to a given topic, including abstracts\",\n",
" system_message=\"You are a helpful AI assistant. Solve tasks using your tools. Specifically, you can take into consideration the user's request and craft a search query that is most likely to return relevant academi papers.\",\n",
")\n",
@ -185,7 +187,7 @@
"\n",
"report_agent = AssistantAgent(\n",
" name=\"Report_Agent\",\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n",
" model_client=model_client,\n",
" description=\"Generate a report based on a given topic\",\n",
" system_message=\"You are a helpful assistant. Your task is to synthesize data extracted into a high quality literature review including CORRECT references. You MUST write a final report that is formatted as a literature review with CORRECT references. Your response should end with the word 'TERMINATE'\",\n",
")"
@ -214,7 +216,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"metadata": {},
"outputs": [
{
@ -306,7 +308,9 @@
" team.run_stream(\n",
" task=\"Write a literature review on no code tools for building multi agent ai systems\",\n",
" )\n",
")"
")\n",
"\n",
"await model_client.close()"
]
}
],

View File

@ -35,34 +35,36 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n",
"\n",
"planner_agent = AssistantAgent(\n",
" \"planner_agent\",\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n",
" model_client=model_client,\n",
" description=\"A helpful assistant that can plan trips.\",\n",
" system_message=\"You are a helpful assistant that can suggest a travel plan for a user based on their request.\",\n",
")\n",
"\n",
"local_agent = AssistantAgent(\n",
" \"local_agent\",\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n",
" model_client=model_client,\n",
" description=\"A local assistant that can suggest local activities or places to visit.\",\n",
" system_message=\"You are a helpful assistant that can suggest authentic and interesting local activities or places to visit for a user and can utilize any context information provided.\",\n",
")\n",
"\n",
"language_agent = AssistantAgent(\n",
" \"language_agent\",\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n",
" model_client=model_client,\n",
" description=\"A helpful assistant that can provide language tips for a given destination.\",\n",
" system_message=\"You are a helpful assistant that can review travel plans, providing feedback on important/critical tips about how best to address language or communication challenges for the given destination. If the plan already includes language tips, you can mention that the plan is satisfactory, with rationale.\",\n",
")\n",
"\n",
"travel_summary_agent = AssistantAgent(\n",
" \"travel_summary_agent\",\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"),\n",
" model_client=model_client,\n",
" description=\"A helpful assistant that can summarize the travel plan.\",\n",
" system_message=\"You are a helpful assistant that can take in all of the suggestions and advice from the other agents and provide a detailed final travel plan. You must ensure that the final plan is integrated and complete. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN. When the plan is complete and all perspectives are integrated, you can respond with TERMINATE.\",\n",
")"
@ -70,7 +72,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"metadata": {},
"outputs": [
{
@ -267,7 +269,9 @@
"group_chat = RoundRobinGroupChat(\n",
" [planner_agent, local_agent, language_agent, travel_summary_agent], termination_condition=termination\n",
")\n",
"await Console(group_chat.run_stream(task=\"Plan a 3 day trip to Nepal.\"))"
"await Console(group_chat.run_stream(task=\"Plan a 3 day trip to Nepal.\"))\n",
"\n",
"await model_client.close()"
]
}
],

View File

@ -67,6 +67,7 @@ async def main() -> None:
)
team = MagenticOneGroupChat([assistant], model_client=model_client)
await Console(team.run_stream(task="Provide a different proof for Fermat's Last Theorem"))
await model_client.close()
asyncio.run(main())

View File

@ -175,13 +175,14 @@
" )\n",
")\n",
"\n",
"model_client = OpenAIChatCompletionClient(\n",
" model=\"gpt-4o\",\n",
")\n",
"\n",
"# Create assistant agent with ChromaDB memory\n",
"assistant_agent = AssistantAgent(\n",
" name=\"assistant_agent\",\n",
" model_client=OpenAIChatCompletionClient(\n",
" model=\"gpt-4o\",\n",
" ),\n",
" model_client=model_client,\n",
" tools=[get_weather],\n",
" memory=[chroma_user_memory],\n",
")\n",
@ -189,6 +190,7 @@
"stream = assistant_agent.run_stream(task=\"What is the weather in New York?\")\n",
"await Console(stream)\n",
"\n",
"await model_client.close()\n",
"await chroma_user_memory.close()"
]
},

View File

@ -222,6 +222,7 @@ async def main():
print(response) # Should print response from OpenAI
response = await cache_client.create([UserMessage(content="Hello, how are you?", source="user")])
print(response) # Should print cached response
await openai_model_client.close()
asyncio.run(main())
@ -289,6 +290,8 @@ async def main() -> None:
response = await assistant.on_messages([TextMessage(content="Hello!", source="user")], cancellation_token)
print(response)
await model_client.close()
asyncio.run(main())
```
@ -329,6 +332,8 @@ async def main() -> None:
response = await assistant.on_messages([message], cancellation_token)
print(response)
await model_client.close()
asyncio.run(main())
```
@ -526,6 +531,8 @@ async def main() -> None:
# Carry on the same chat again.
response = await assistant.on_messages([TextMessage(content="Tell me a joke.", source="user")], cancellation_token)
# Close the connection to the model client.
await model_client.close()
asyncio.run(main())
```
@ -604,6 +611,9 @@ async def main() -> None:
stream = group_chat.run_stream(task="Write a python script to print 'Hello, world!'")
# `Console` is a simple UI to display the stream.
await Console(stream)
# Close the connection to the model client.
await model_client.close()
asyncio.run(main())
```
@ -682,6 +692,7 @@ async def main() -> None:
break
response = await assistant.on_messages([TextMessage(content=user_input, source="user")], CancellationToken())
print("Assistant:", response.chat_message.content)
await model_client.close()
asyncio.run(main())
```
@ -919,6 +930,8 @@ async def main() -> None:
stream = group_chat.run_stream(task="Write a short story about a robot that discovers it has feelings.")
# `Console` is a simple UI to display the stream.
await Console(stream)
# Close the connection to the model client.
await model_client.close()
asyncio.run(main())
```
@ -950,9 +963,7 @@ from autogen_agentchat.conditions import TextMentionTermination
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient
def create_team() -> RoundRobinGroupChat:
model_client = OpenAIChatCompletionClient(model="gpt-4o", seed=42, temperature=0)
def create_team(model_client : OpenAIChatCompletionClient) -> RoundRobinGroupChat:
writer = AssistantAgent(
name="writer",
description="A writer.",
@ -977,8 +988,9 @@ def create_team() -> RoundRobinGroupChat:
async def main() -> None:
model_client = OpenAIChatCompletionClient(model="gpt-4o", seed=42, temperature=0)
# Create team.
group_chat = create_team()
group_chat = create_team(model_client)
# `run_stream` returns an async generator to stream the intermediate messages.
stream = group_chat.run_stream(task="Write a short story about a robot that discovers it has feelings.")
@ -991,7 +1003,7 @@ async def main() -> None:
json.dump(state, f)
# Create a new team with the same participants configuration.
group_chat = create_team()
group_chat = create_team(model_client)
# Load the state of the group chat and all participants.
with open("group_chat_state.json", "r") as f:
@ -1002,6 +1014,9 @@ async def main() -> None:
stream = group_chat.run_stream(task="Translate the story into Chinese.")
await Console(stream)
# Close the connection to the model client.
await model_client.close()
asyncio.run(main())
```
@ -1074,9 +1089,7 @@ def search_web_tool(query: str) -> str:
def percentage_change_tool(start: float, end: float) -> float:
return ((end - start) / start) * 100
def create_team() -> SelectorGroupChat:
model_client = OpenAIChatCompletionClient(model="gpt-4o")
def create_team(model_client : OpenAIChatCompletionClient) -> SelectorGroupChat:
planning_agent = AssistantAgent(
"PlanningAgent",
description="An agent for planning tasks, this agent should be the first to engage when given a new task.",
@ -1142,7 +1155,8 @@ def create_team() -> SelectorGroupChat:
return team
async def main() -> None:
team = create_team()
model_client = OpenAIChatCompletionClient(model="gpt-4o")
team = create_team(model_client)
task = "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?"
await Console(team.run_stream(task=task))
@ -1318,6 +1332,8 @@ async def main() -> None:
break
response = await assistant.on_messages([TextMessage(content=user_input, source="user")], CancellationToken())
print("Assistant:", response.chat_message.content)
await model_client.close()
asyncio.run(main())
```

View File

@ -98,6 +98,8 @@
"# Run the agent and stream the messages to the console.\n",
"async def main() -> None:\n",
" await Console(agent.run_stream(task=\"What is the weather in New York?\"))\n",
" # Close the connection to the model client.\n",
" await model_client.close()\n",
"\n",
"\n",
"# NOTE: if running this inside a Python script you'll need to use asyncio.run(main()).\n",

View File

@ -189,7 +189,9 @@
"team = RoundRobinGroupChat(participants=[agent], termination_condition=MaxMessageTermination(2))\n",
"\n",
"team_config = team.dump_component() # dump component\n",
"print(team_config.model_dump_json())"
"print(team_config.model_dump_json())\n",
"\n",
"await model_client.close()"
]
},
{

View File

@ -185,7 +185,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": null,
"metadata": {},
"outputs": [
{
@ -264,7 +264,8 @@
"\n",
"\n",
"# Use asyncio.run(...) if you are running this in a script.\n",
"await run_team_stream()"
"await run_team_stream()\n",
"await model_client.close()"
]
},
{
@ -397,7 +398,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": null,
"metadata": {},
"outputs": [
{
@ -509,7 +510,8 @@
")\n",
"\n",
"task = \"Conduct market research for TSLA stock\"\n",
"await Console(research_team.run_stream(task=task))"
"await Console(research_team.run_stream(task=task))\n",
"await model_client.close()"
]
},
{

View File

@ -380,7 +380,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": null,
"metadata": {},
"outputs": [
{
@ -418,7 +418,10 @@
"\n",
"# Let the agent fetch the content of a URL and summarize it.\n",
"result = await agent.run(task=\"Summarize the content of https://en.wikipedia.org/wiki/Seattle\")\n",
"print(result.messages[-1].content)"
"print(result.messages[-1].content)\n",
"\n",
"# Close the connection to the model client.\n",
"await model_client.close()"
]
},
{
@ -481,7 +484,9 @@
" [TextMessage(content=\"What's the average age of the passengers?\", source=\"user\")], CancellationToken()\n",
" ),\n",
" output_stats=True,\n",
")"
")\n",
"\n",
"await model_client.close()"
]
},
{
@ -607,7 +612,8 @@
" system_message=\"Categorize the input as happy, sad, or neutral following the JSON format.\",\n",
")\n",
"\n",
"await Console(agent.run_stream(task=\"I am happy.\"))"
"await Console(agent.run_stream(task=\"I am happy.\"))\n",
"await model_client.close()"
]
},
{

View File

@ -124,7 +124,8 @@
"# Run the conversation and stream to the console.\n",
"stream = team.run_stream(task=\"Write a 4-line poem about the ocean.\")\n",
"# Use asyncio.run(...) when running in a script.\n",
"await Console(stream)"
"await Console(stream)\n",
"await model_client.close()"
]
},
{
@ -225,7 +226,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"metadata": {},
"outputs": [
{
@ -287,7 +288,8 @@
" # Get the user response.\n",
" task = input(\"Enter your feedback (type 'exit' to leave): \")\n",
" if task.lower().strip() == \"exit\":\n",
" break"
" break\n",
"await model_client.close()"
]
},
{
@ -319,7 +321,7 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": null,
"metadata": {},
"outputs": [
{

View File

@ -109,7 +109,8 @@
"from autogen_core.models import UserMessage\n",
"\n",
"result = await openai_model_client.create([UserMessage(content=\"What is the capital of France?\", source=\"user\")])\n",
"print(result)"
"print(result)\n",
"await openai_model_client.close()"
]
},
{
@ -178,7 +179,11 @@
" azure_endpoint=\"https://{your-custom-endpoint}.openai.azure.com/\",\n",
" azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication.\n",
" # api_key=\"sk-...\", # For key-based authentication.\n",
")"
")\n",
"\n",
"result = await az_model_client.create([UserMessage(content=\"What is the capital of France?\", source=\"user\")])\n",
"print(result)\n",
"await az_model_client.close()"
]
},
{
@ -256,7 +261,8 @@
")\n",
"\n",
"result = await client.create([UserMessage(content=\"What is the capital of France?\", source=\"user\")])\n",
"print(result)"
"print(result)\n",
"await client.close()"
]
},
{
@ -297,7 +303,8 @@
"\n",
"anthropic_client = AnthropicChatCompletionClient(model=\"claude-3-7-sonnet-20250219\")\n",
"result = await anthropic_client.create([UserMessage(content=\"What is the capital of France?\", source=\"user\")])\n",
"print(result)"
"print(result)\n",
"await anthropic_client.close()"
]
},
{
@ -350,7 +357,8 @@
"ollama_model_client = OllamaChatCompletionClient(model=\"llama3.2\")\n",
"\n",
"response = await ollama_model_client.create([UserMessage(content=\"What is the capital of France?\", source=\"user\")])\n",
"print(response)"
"print(response)\n",
"await ollama_model_client.close()"
]
},
{
@ -391,7 +399,8 @@
")\n",
"\n",
"response = await model_client.create([UserMessage(content=\"What is the capital of France?\", source=\"user\")])\n",
"print(response)"
"print(response)\n",
"await model_client.close()"
]
},
{
@ -479,7 +488,8 @@
"model_result = await anthropic_model_client.create(\n",
" messages=[UserMessage(content=\"What is the capital of France?\", source=\"User\")]\n",
")\n",
"print(model_result)"
"print(model_result)\n",
"await anthropic_model_client.close()"
]
},
{

View File

@ -41,20 +41,20 @@
"from autogen_core import CancellationToken\n",
"from autogen_ext.models.openai import OpenAIChatCompletionClient\n",
"\n",
"model_client = OpenAIChatCompletionClient(model=\"gpt-4o-2024-08-06\")\n",
"\n",
"assistant_agent = AssistantAgent(\n",
" name=\"assistant_agent\",\n",
" system_message=\"You are a helpful assistant\",\n",
" model_client=OpenAIChatCompletionClient(\n",
" model=\"gpt-4o-2024-08-06\",\n",
" # api_key=\"YOUR_API_KEY\",\n",
" ),\n",
" model_client=model_client,\n",
")\n",
"\n",
"# Use asyncio.run(...) when running in a script.\n",
"response = await assistant_agent.on_messages(\n",
" [TextMessage(content=\"Write a 3 line poem on lake tangayika\", source=\"user\")], CancellationToken()\n",
")\n",
"print(response.chat_message.content)"
"print(response.chat_message.content)\n",
"await model_client.close()"
]
},
{
@ -89,12 +89,12 @@
}
],
"source": [
"model_client = OpenAIChatCompletionClient(model=\"gpt-4o-2024-08-06\")\n",
"\n",
"new_assistant_agent = AssistantAgent(\n",
" name=\"assistant_agent\",\n",
" system_message=\"You are a helpful assistant\",\n",
" model_client=OpenAIChatCompletionClient(\n",
" model=\"gpt-4o-2024-08-06\",\n",
" ),\n",
" model_client=model_client,\n",
")\n",
"await new_assistant_agent.load_state(agent_state)\n",
"\n",
@ -102,7 +102,8 @@
"response = await new_assistant_agent.on_messages(\n",
" [TextMessage(content=\"What was the last line of the previous poem you wrote\", source=\"user\")], CancellationToken()\n",
")\n",
"print(response.chat_message.content)"
"print(response.chat_message.content)\n",
"await model_client.close()"
]
},
{
@ -154,13 +155,13 @@
}
],
"source": [
"model_client = OpenAIChatCompletionClient(model=\"gpt-4o-2024-08-06\")\n",
"\n",
"# Define a team.\n",
"assistant_agent = AssistantAgent(\n",
" name=\"assistant_agent\",\n",
" system_message=\"You are a helpful assistant\",\n",
" model_client=OpenAIChatCompletionClient(\n",
" model=\"gpt-4o-2024-08-06\",\n",
" ),\n",
" model_client=model_client,\n",
")\n",
"agent_team = RoundRobinGroupChat([assistant_agent], termination_condition=MaxMessageTermination(max_messages=2))\n",
"\n",
@ -329,7 +330,8 @@
"new_agent_team = RoundRobinGroupChat([assistant_agent], termination_condition=MaxMessageTermination(max_messages=2))\n",
"await new_agent_team.load_state(team_state)\n",
"stream = new_agent_team.run_stream(task=\"What was the last line of the poem you wrote?\")\n",
"await Console(stream)"
"await Console(stream)\n",
"await model_client.close()"
]
}
],

View File

@ -676,7 +676,9 @@
"\n",
"# Run the team with a task and print the messages to the console.\n",
"async for message in team.run_stream(task=\"Increment the number 5 to 10.\"): # type: ignore\n",
" print(type(message).__name__, message)"
" print(type(message).__name__, message)\n",
"\n",
"await model_client.close()"
]
},
{

View File

@ -481,7 +481,8 @@
"round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=function_call_termination)\n",
"\n",
"# Use asyncio.run(...) if you are running this script as a standalone script.\n",
"await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))"
"await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))\n",
"await model_client.close()"
]
},
{

View File

@ -232,7 +232,10 @@
"assert isinstance(response.content, str)\n",
"parsed_response = AgentResponse.model_validate_json(response.content)\n",
"print(parsed_response.thoughts)\n",
"print(parsed_response.response)"
"print(parsed_response.response)\n",
"\n",
"# Close the connection to the model client.\n",
"await model_client.close()"
]
},
{
@ -312,6 +315,9 @@
" response = await cache_client.create([UserMessage(content=\"Hello, how are you?\", source=\"user\")])\n",
" print(response) # Should print cached response\n",
"\n",
" await openai_model_client.close()\n",
" await cache_client.close()\n",
"\n",
"\n",
"asyncio.run(main())"
]
@ -435,16 +441,16 @@
"# Create the runtime and register the agent.\n",
"from autogen_core import AgentId\n",
"\n",
"model_client = OpenAIChatCompletionClient(\n",
" model=\"gpt-4o-mini\",\n",
" # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY set in the environment.\n",
")\n",
"\n",
"runtime = SingleThreadedAgentRuntime()\n",
"await SimpleAgent.register(\n",
" runtime,\n",
" \"simple_agent\",\n",
" lambda: SimpleAgent(\n",
" OpenAIChatCompletionClient(\n",
" model=\"gpt-4o-mini\",\n",
" # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY set in the environment.\n",
" )\n",
" ),\n",
" lambda: SimpleAgent(model_client=model_client),\n",
")\n",
"# Start the runtime processing messages.\n",
"runtime.start()\n",
@ -453,7 +459,8 @@
"response = await runtime.send_message(message, AgentId(\"simple_agent\", \"default\"))\n",
"print(response.content)\n",
"# Stop the runtime processing messages.\n",
"await runtime.stop()"
"await runtime.stop()\n",
"await model_client.close()"
]
},
{

View File

@ -126,16 +126,16 @@
}
],
"source": [
"model_client = OpenAIChatCompletionClient(\n",
" model=\"gpt-4o-mini\",\n",
" # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY set in the environment.\n",
")\n",
"\n",
"runtime = SingleThreadedAgentRuntime()\n",
"await SimpleAgentWithContext.register(\n",
" runtime,\n",
" \"simple_agent_context\",\n",
" lambda: SimpleAgentWithContext(\n",
" OpenAIChatCompletionClient(\n",
" model=\"gpt-4o-mini\",\n",
" # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY set in the environment.\n",
" )\n",
" ),\n",
" lambda: SimpleAgentWithContext(model_client=model_client),\n",
")\n",
"# Start the runtime processing messages.\n",
"runtime.start()\n",
@ -155,7 +155,8 @@
"print(f\"Response: {response.content}\")\n",
"\n",
"# Stop the runtime processing messages.\n",
"await runtime.stop()"
"await runtime.stop()\n",
"await model_client.close()"
]
},
{

View File

@ -168,14 +168,14 @@
"from autogen_ext.models.openai import OpenAIChatCompletionClient\n",
"\n",
"# Create the OpenAI chat completion client. Using OPENAI_API_KEY from environment variable.\n",
"client = OpenAIChatCompletionClient(model=\"gpt-4o-mini\")\n",
"model_client = OpenAIChatCompletionClient(model=\"gpt-4o-mini\")\n",
"\n",
"# Create a user message.\n",
"user_message = UserMessage(content=\"What is the stock price of AAPL on 2021/01/01?\", source=\"user\")\n",
"\n",
"# Run the chat completion with the stock_price_tool defined above.\n",
"cancellation_token = CancellationToken()\n",
"create_result = await client.create(\n",
"create_result = await model_client.create(\n",
" messages=[user_message], tools=[stock_price_tool], cancellation_token=cancellation_token\n",
")\n",
"create_result.content"
@ -252,8 +252,9 @@
" AssistantMessage(content=create_result.content, source=\"assistant\"), # assistant message with tool call\n",
" FunctionExecutionResultMessage(content=[exec_result]), # function execution result message\n",
"]\n",
"create_result = await client.create(messages=messages, cancellation_token=cancellation_token) # type: ignore\n",
"print(create_result.content)"
"create_result = await model_client.create(messages=messages, cancellation_token=cancellation_token) # type: ignore\n",
"print(create_result.content)\n",
"await model_client.close()"
]
},
{
@ -394,6 +395,8 @@
}
],
"source": [
"# Create the model client.\n",
"model_client = OpenAIChatCompletionClient(model=\"gpt-4o-mini\")\n",
"# Create a runtime.\n",
"runtime = SingleThreadedAgentRuntime()\n",
"# Create the tools.\n",
@ -403,8 +406,8 @@
" runtime,\n",
" \"tool_use_agent\",\n",
" lambda: ToolUseAgent(\n",
" OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n",
" tools,\n",
" model_client=model_client,\n",
" tool_schema=tools,\n",
" ),\n",
")"
]
@ -439,7 +442,8 @@
"response = await runtime.send_message(Message(\"What is the stock price of NVDA on 2024/06/01?\"), tool_use_agent)\n",
"print(response.content)\n",
"# Stop processing messages.\n",
"await runtime.stop()"
"await runtime.stop()\n",
"await model_client.close()"
]
}
],

View File

@ -157,22 +157,24 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"runtime = SingleThreadedAgentRuntime()\n",
"\n",
"model_client = get_model_client()\n",
"\n",
"cathy = await Assistant.register(\n",
" runtime,\n",
" \"cathy\",\n",
" lambda: Assistant(name=\"Cathy\", model_client=get_model_client()),\n",
" lambda: Assistant(name=\"Cathy\", model_client=model_client),\n",
")\n",
"\n",
"joe = await Assistant.register(\n",
" runtime,\n",
" \"joe\",\n",
" lambda: Assistant(name=\"Joe\", model_client=get_model_client()),\n",
" lambda: Assistant(name=\"Joe\", model_client=model_client),\n",
")"
]
},
@ -185,7 +187,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"metadata": {},
"outputs": [
{
@ -224,7 +226,10 @@
" recipient=AgentId(joe, \"default\"),\n",
" sender=AgentId(cathy, \"default\"),\n",
")\n",
"await runtime.stop_when_idle()"
"await runtime.stop_when_idle()\n",
"\n",
"# Close the connections to the model clients.\n",
"await model_client.close()"
]
}
],

View File

@ -219,13 +219,14 @@
" tools=[python_tool],\n",
" ),\n",
")\n",
"model_client = OpenAIChatCompletionClient(model=\"gpt-4o-mini\")\n",
"await ToolUseAgent.register(\n",
" runtime,\n",
" \"tool_enabled_agent\",\n",
" lambda: ToolUseAgent(\n",
" description=\"Tool Use Agent\",\n",
" system_messages=[SystemMessage(content=\"You are a helpful AI Assistant. Use your tools to solve problems.\")],\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n",
" model_client=model_client,\n",
" tool_schema=[python_tool.schema],\n",
" tool_agent_type=tool_agent_type,\n",
" ),\n",
@ -266,7 +267,10 @@
"\n",
"# Stop the runtime and the docker executor.\n",
"await runtime.stop()\n",
"await docker_executor.stop()"
"await docker_executor.stop()\n",
"\n",
"# Close the connection to the model client.\n",
"await model_client.close()"
]
}
],

View File

@ -246,15 +246,14 @@
"async with DockerCommandLineCodeExecutor(work_dir=work_dir) as executor: # type: ignore[syntax]\n",
" # Register the assistant and executor agents by providing\n",
" # their agent types, the factory functions for creating instance and subscriptions.\n",
" model_client = OpenAIChatCompletionClient(\n",
" model=\"gpt-4o\",\n",
" # api_key=\"YOUR_API_KEY\"\n",
" )\n",
" await Assistant.register(\n",
" runtime,\n",
" \"assistant\",\n",
" lambda: Assistant(\n",
" OpenAIChatCompletionClient(\n",
" model=\"gpt-4o\",\n",
" # api_key=\"YOUR_API_KEY\"\n",
" )\n",
" ),\n",
" lambda: Assistant(model_client=model_client),\n",
" )\n",
" await Executor.register(runtime, \"executor\", lambda: Executor(executor))\n",
"\n",
@ -263,7 +262,11 @@
" await runtime.publish_message(\n",
" Message(\"Create a plot of NVIDA vs TSLA stock returns YTD from 2024-01-01.\"), DefaultTopicId()\n",
" )\n",
" await runtime.stop_when_idle()"
"\n",
" # Wait for the runtime to stop when idle.\n",
" await runtime.stop_when_idle()\n",
" # Close the connection to the model client.\n",
" await model_client.close()"
]
},
{

View File

@ -467,7 +467,7 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@ -484,16 +484,18 @@
"user_description = \"User for providing final approval.\"\n",
"illustrator_description = \"An illustrator for creating images.\"\n",
"\n",
"model_client = OpenAIChatCompletionClient(\n",
" model=\"gpt-4o-2024-08-06\",\n",
" # api_key=\"YOUR_API_KEY\",\n",
")\n",
"\n",
"editor_agent_type = await EditorAgent.register(\n",
" runtime,\n",
" editor_topic_type, # Using topic type as the agent type.\n",
" lambda: EditorAgent(\n",
" description=editor_description,\n",
" group_chat_topic_type=group_chat_topic_type,\n",
" model_client=OpenAIChatCompletionClient(\n",
" model=\"gpt-4o-2024-08-06\",\n",
" # api_key=\"YOUR_API_KEY\",\n",
" ),\n",
" model_client=model_client,\n",
" ),\n",
")\n",
"await runtime.add_subscription(TypeSubscription(topic_type=editor_topic_type, agent_type=editor_agent_type.type))\n",
@ -505,10 +507,7 @@
" lambda: WriterAgent(\n",
" description=writer_description,\n",
" group_chat_topic_type=group_chat_topic_type,\n",
" model_client=OpenAIChatCompletionClient(\n",
" model=\"gpt-4o-2024-08-06\",\n",
" # api_key=\"YOUR_API_KEY\",\n",
" ),\n",
" model_client=model_client,\n",
" ),\n",
")\n",
"await runtime.add_subscription(TypeSubscription(topic_type=writer_topic_type, agent_type=writer_agent_type.type))\n",
@ -520,10 +519,7 @@
" lambda: IllustratorAgent(\n",
" description=illustrator_description,\n",
" group_chat_topic_type=group_chat_topic_type,\n",
" model_client=OpenAIChatCompletionClient(\n",
" model=\"gpt-4o-2024-08-06\",\n",
" # api_key=\"YOUR_API_KEY\",\n",
" ),\n",
" model_client=model_client,\n",
" image_client=openai.AsyncClient(\n",
" # api_key=\"YOUR_API_KEY\",\n",
" ),\n",
@ -549,10 +545,7 @@
" \"group_chat_manager\",\n",
" lambda: GroupChatManager(\n",
" participant_topic_types=[writer_topic_type, illustrator_topic_type, editor_topic_type, user_topic_type],\n",
" model_client=OpenAIChatCompletionClient(\n",
" model=\"gpt-4o-2024-08-06\",\n",
" # api_key=\"YOUR_API_KEY\",\n",
" ),\n",
" model_client=model_client,\n",
" participant_descriptions=[writer_description, illustrator_description, editor_description, user_description],\n",
" ),\n",
")\n",
@ -572,7 +565,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": null,
"metadata": {},
"outputs": [
{
@ -1171,7 +1164,8 @@
" ),\n",
" TopicId(type=group_chat_topic_type, source=session_id),\n",
")\n",
"await runtime.stop_when_idle()"
"await runtime.stop_when_idle()\n",
"await model_client.close()"
]
},
{

View File

@ -727,7 +727,8 @@
"await runtime.publish_message(UserLogin(), topic_id=TopicId(user_topic_type, source=session_id))\n",
"\n",
"# Run until completion.\n",
"await runtime.stop_when_idle()"
"await runtime.stop_when_idle()\n",
"await model_client.close()"
]
},
{

View File

@ -218,7 +218,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"metadata": {},
"outputs": [
{
@ -477,20 +477,20 @@
],
"source": [
"runtime = SingleThreadedAgentRuntime()\n",
"await WorkerAgent.register(\n",
" runtime, \"worker\", lambda: WorkerAgent(model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"))\n",
")\n",
"model_client = OpenAIChatCompletionClient(model=\"gpt-4o-mini\")\n",
"await WorkerAgent.register(runtime, \"worker\", lambda: WorkerAgent(model_client=model_client))\n",
"await OrchestratorAgent.register(\n",
" runtime,\n",
" \"orchestrator\",\n",
" lambda: OrchestratorAgent(\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"), worker_agent_types=[\"worker\"] * 3, num_layers=3\n",
" ),\n",
" lambda: OrchestratorAgent(model_client=model_client, worker_agent_types=[\"worker\"] * 3, num_layers=3),\n",
")\n",
"\n",
"runtime.start()\n",
"result = await runtime.send_message(UserTask(task=task), AgentId(\"orchestrator\", \"default\"))\n",
"\n",
"await runtime.stop_when_idle()\n",
"await model_client.close()\n",
"\n",
"print(f\"{'-'*80}\\nFinal result:\\n{result.result}\")"
]
}

View File

@ -302,7 +302,7 @@
},
{
"cell_type": "code",
"execution_count": 42,
"execution_count": null,
"metadata": {},
"outputs": [
{
@ -318,11 +318,14 @@
],
"source": [
"runtime = SingleThreadedAgentRuntime()\n",
"\n",
"model_client = OpenAIChatCompletionClient(model=\"gpt-4o-mini\")\n",
"\n",
"await MathSolver.register(\n",
" runtime,\n",
" \"MathSolverA\",\n",
" lambda: MathSolver(\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n",
" model_client=model_client,\n",
" topic_type=\"MathSolverA\",\n",
" num_neighbors=2,\n",
" max_round=3,\n",
@ -332,7 +335,7 @@
" runtime,\n",
" \"MathSolverB\",\n",
" lambda: MathSolver(\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n",
" model_client=model_client,\n",
" topic_type=\"MathSolverB\",\n",
" num_neighbors=2,\n",
" max_round=3,\n",
@ -342,7 +345,7 @@
" runtime,\n",
" \"MathSolverC\",\n",
" lambda: MathSolver(\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n",
" model_client=model_client,\n",
" topic_type=\"MathSolverC\",\n",
" num_neighbors=2,\n",
" max_round=3,\n",
@ -352,7 +355,7 @@
" runtime,\n",
" \"MathSolverD\",\n",
" lambda: MathSolver(\n",
" model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n",
" model_client=model_client,\n",
" topic_type=\"MathSolverD\",\n",
" num_neighbors=2,\n",
" max_round=3,\n",
@ -407,7 +410,7 @@
},
{
"cell_type": "code",
"execution_count": 44,
"execution_count": null,
"metadata": {},
"outputs": [
{
@ -543,7 +546,10 @@
"question = \"Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May?\"\n",
"runtime.start()\n",
"await runtime.publish_message(Question(content=question), DefaultTopicId())\n",
"await runtime.stop_when_idle()"
"# Wait for the runtime to stop when idle.\n",
"await runtime.stop_when_idle()\n",
"# Close the connection to the model client.\n",
"await model_client.close()"
]
}
],

View File

@ -385,7 +385,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"metadata": {},
"outputs": [
{
@ -445,12 +445,9 @@
"from autogen_ext.models.openai import OpenAIChatCompletionClient\n",
"\n",
"runtime = SingleThreadedAgentRuntime()\n",
"await ReviewerAgent.register(\n",
" runtime, \"ReviewerAgent\", lambda: ReviewerAgent(model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"))\n",
")\n",
"await CoderAgent.register(\n",
" runtime, \"CoderAgent\", lambda: CoderAgent(model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"))\n",
")\n",
"model_client = OpenAIChatCompletionClient(model=\"gpt-4o-mini\")\n",
"await ReviewerAgent.register(runtime, \"ReviewerAgent\", lambda: ReviewerAgent(model_client=model_client))\n",
"await CoderAgent.register(runtime, \"CoderAgent\", lambda: CoderAgent(model_client=model_client))\n",
"runtime.start()\n",
"await runtime.publish_message(\n",
" message=CodeWritingTask(task=\"Write a function to find the sum of all even numbers in a list.\"),\n",
@ -458,7 +455,9 @@
")\n",
"\n",
"# Keep processing messages until idle.\n",
"await runtime.stop_when_idle()"
"await runtime.stop_when_idle()\n",
"# Close the model client.\n",
"await model_client.close()"
]
},
{

View File

@ -363,7 +363,8 @@
" topic_id=TopicId(concept_extractor_topic_type, source=\"default\"),\n",
")\n",
"\n",
"await runtime.stop_when_idle()"
"await runtime.stop_when_idle()\n",
"await model_client.close()"
]
}
],

View File

@ -10,11 +10,7 @@ from autogen_core.model_context import BufferedChatCompletionContext
from autogen_core.models import ChatCompletionClient
def create_ai_player() -> AssistantAgent:
# Load the model client from config.
with open("model_config.yaml", "r") as f:
model_config = yaml.safe_load(f)
model_client = ChatCompletionClient.load_component(model_config)
def create_ai_player(model_client: ChatCompletionClient) -> AssistantAgent:
# Create an agent that can use the model client.
player = AssistantAgent(
name="ai_player",
@ -101,7 +97,11 @@ async def get_ai_move(board: chess.Board, player: AssistantAgent, max_tries: int
async def main(human_player: bool, max_tries: int) -> None:
board = chess.Board()
player = create_ai_player()
# Load the model client from config.
with open("model_config.yaml", "r") as f:
model_config = yaml.safe_load(f)
model_client = ChatCompletionClient.load_component(model_config)
player = create_ai_player(model_client)
while not board.is_game_over():
# Get the AI's move.
ai_move = await get_ai_move(board, player, max_tries)
@ -126,6 +126,8 @@ async def main(human_player: bool, max_tries: int) -> None:
print("----------------")
print(f"Game over! Result: {result}")
await model_client.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()

View File

@ -41,6 +41,7 @@ async def main(model_config: Dict[str, Any]) -> None:
print(f"\nQuery: {query}")
await Console(assistant_agent.run_stream(task=query))
await model_client.close()
if __name__ == "__main__":

View File

@ -306,6 +306,7 @@ async def main(model_config: Dict[str, Any], latest_user_input: Optional[str] =
runtime.start()
await runtime.stop_when(lambda: termination_handler.is_terminated or needs_user_input_handler.needs_user_input)
await model_client.close()
user_input_needed = None
if needs_user_input_handler.user_input_content is not None:

View File

@ -141,7 +141,7 @@ def make_move(
return f"Moved {piece_name} ({piece_symbol}) from {SQUARE_NAMES[new_move.from_square]} to {SQUARE_NAMES[new_move.to_square]}."
async def chess_game(runtime: AgentRuntime, model_config: Dict[str, Any]) -> None: # type: ignore
async def chess_game(runtime: AgentRuntime, model_client : ChatCompletionClient) -> None: # type: ignore
"""Create agents for a chess game and return the group chat."""
# Create the board.
@ -205,8 +205,6 @@ async def chess_game(runtime: AgentRuntime, model_config: Dict[str, Any]) -> Non
),
]
model_client = ChatCompletionClient.load_component(model_config)
# Register the agents.
await ToolAgent.register(
runtime,
@ -250,7 +248,8 @@ async def chess_game(runtime: AgentRuntime, model_config: Dict[str, Any]) -> Non
async def main(model_config: Dict[str, Any]) -> None:
"""Main Entrypoint."""
runtime = SingleThreadedAgentRuntime()
await chess_game(runtime, model_config)
model_client = ChatCompletionClient.load_component(model_config)
await chess_game(runtime, model_client)
runtime.start()
# Publish an initial message to trigger the group chat manager to start
# orchestration.
@ -260,6 +259,7 @@ async def main(model_config: Dict[str, Any]) -> None:
AgentId("PlayerWhite", "default"),
)
await runtime.stop_when_idle()
await model_client.close()
if __name__ == "__main__":

View File

@ -21,6 +21,7 @@ async def main(config: AppConfig):
await asyncio.sleep(4)
Console().print(Markdown("Starting **`Editor Agent`**"))
await editor_agent_runtime.start()
model_client = AzureOpenAIChatCompletionClient(**config.client_config)
editor_agent_type = await BaseGroupChatAgent.register(
editor_agent_runtime,
config.editor_agent.topic_type,
@ -28,7 +29,7 @@ async def main(config: AppConfig):
description=config.editor_agent.description,
group_chat_topic_type=config.group_chat_manager.topic_type,
system_message=config.editor_agent.system_message,
model_client=AzureOpenAIChatCompletionClient(**config.client_config),
model_client=model_client,
ui_config=config.ui_agent,
),
)
@ -40,6 +41,7 @@ async def main(config: AppConfig):
)
await editor_agent_runtime.stop_when_signal()
await model_client.close()
if __name__ == "__main__":

View File

@ -26,11 +26,13 @@ async def main(config: AppConfig):
await group_chat_manager_runtime.start()
set_all_log_levels(logging.ERROR)
model_client = AzureOpenAIChatCompletionClient(**config.client_config)
group_chat_manager_type = await GroupChatManager.register(
group_chat_manager_runtime,
"group_chat_manager",
lambda: GroupChatManager(
model_client=AzureOpenAIChatCompletionClient(**config.client_config),
model_client=model_client,
participant_topic_types=[config.writer_agent.topic_type, config.editor_agent.topic_type],
participant_descriptions=[config.writer_agent.description, config.editor_agent.description],
max_rounds=config.group_chat_manager.max_rounds,
@ -64,6 +66,7 @@ async def main(config: AppConfig):
)
await group_chat_manager_runtime.stop_when_signal()
await model_client.close()
Console().print("Manager left the chat!")

View File

@ -31,6 +31,9 @@ async def main():
break
await Console(assistant_agent.run_stream(task=user_input))
# Close the connection to the client
await client.close()
if __name__ == "__main__":
import asyncio
asyncio.run(main())