test_children_to_parent #13
|
@ -241,7 +241,7 @@ class AgentOutputBlock(Block):
|
|||
advanced=True,
|
||||
)
|
||||
format: str = SchemaField(
|
||||
description="The format string to be used to format the recorded_value.",
|
||||
description="The format string to be used to format the recorded_value. Use Jinja2 syntax.",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
|
|
@ -26,8 +26,10 @@ from backend.data.model import (
|
|||
)
|
||||
from backend.util import json
|
||||
from backend.util.settings import BehaveAs, Settings
|
||||
from backend.util.text import TextFormatter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
fmt = TextFormatter()
|
||||
|
||||
LLMProviderName = Literal[
|
||||
ProviderName.ANTHROPIC,
|
||||
|
@ -109,6 +111,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
|||
LLAMA3_1_70B = "llama-3.1-70b-versatile"
|
||||
LLAMA3_1_8B = "llama-3.1-8b-instant"
|
||||
# Ollama models
|
||||
OLLAMA_LLAMA3_2 = "llama3.2"
|
||||
OLLAMA_LLAMA3_8B = "llama3"
|
||||
OLLAMA_LLAMA3_405B = "llama3.1:405b"
|
||||
OLLAMA_DOLPHIN = "dolphin-mistral:latest"
|
||||
|
@ -163,6 +166,7 @@ MODEL_METADATA = {
|
|||
# Limited to 16k during preview
|
||||
LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072),
|
||||
LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072),
|
||||
LlmModel.OLLAMA_LLAMA3_2: ModelMetadata("ollama", 8192),
|
||||
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192),
|
||||
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192),
|
||||
LlmModel.OLLAMA_DOLPHIN: ModelMetadata("ollama", 32768),
|
||||
|
@ -234,7 +238,9 @@ class AIStructuredResponseGeneratorBlock(Block):
|
|||
description="Number of times to retry the LLM call if the response does not match the expected format.",
|
||||
)
|
||||
prompt_values: dict[str, str] = SchemaField(
|
||||
advanced=False, default={}, description="Values used to fill in the prompt."
|
||||
advanced=False,
|
||||
default={},
|
||||
description="Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}.",
|
||||
)
|
||||
max_tokens: int | None = SchemaField(
|
||||
advanced=True,
|
||||
|
@ -448,8 +454,8 @@ class AIStructuredResponseGeneratorBlock(Block):
|
|||
|
||||
values = input_data.prompt_values
|
||||
if values:
|
||||
input_data.prompt = input_data.prompt.format(**values)
|
||||
input_data.sys_prompt = input_data.sys_prompt.format(**values)
|
||||
input_data.prompt = fmt.format_string(input_data.prompt, values)
|
||||
input_data.sys_prompt = fmt.format_string(input_data.sys_prompt, values)
|
||||
|
||||
if input_data.sys_prompt:
|
||||
prompt.append({"role": "system", "content": input_data.sys_prompt})
|
||||
|
@ -576,7 +582,9 @@ class AITextGeneratorBlock(Block):
|
|||
description="Number of times to retry the LLM call if the response does not match the expected format.",
|
||||
)
|
||||
prompt_values: dict[str, str] = SchemaField(
|
||||
advanced=False, default={}, description="Values used to fill in the prompt."
|
||||
advanced=False,
|
||||
default={},
|
||||
description="Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}.",
|
||||
)
|
||||
ollama_host: str = SchemaField(
|
||||
advanced=True,
|
||||
|
|
|
@ -141,10 +141,10 @@ class ExtractTextInformationBlock(Block):
|
|||
class FillTextTemplateBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
values: dict[str, Any] = SchemaField(
|
||||
description="Values (dict) to be used in format"
|
||||
description="Values (dict) to be used in format. These values can be used by putting them in double curly braces in the format template. e.g. {{value_name}}.",
|
||||
)
|
||||
format: str = SchemaField(
|
||||
description="Template to format the text using `values`"
|
||||
description="Template to format the text using `values`. Use Jinja2 syntax."
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
|
@ -160,7 +160,7 @@ class FillTextTemplateBlock(Block):
|
|||
test_input=[
|
||||
{
|
||||
"values": {"name": "Alice", "hello": "Hello", "world": "World!"},
|
||||
"format": "{hello}, {world} {{name}}",
|
||||
"format": "{{hello}}, {{ world }} {{name}}",
|
||||
},
|
||||
{
|
||||
"values": {"list": ["Hello", " World!"]},
|
||||
|
|
|
@ -51,6 +51,7 @@ MODEL_COST: dict[LlmModel, int] = {
|
|||
LlmModel.LLAMA3_1_405B: 1,
|
||||
LlmModel.LLAMA3_1_70B: 1,
|
||||
LlmModel.LLAMA3_1_8B: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_2: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_8B: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_405B: 1,
|
||||
LlmModel.OLLAMA_DOLPHIN: 1,
|
||||
|
|
|
@ -38,7 +38,7 @@ def create_test_graph() -> graph.Graph:
|
|||
graph.Node(
|
||||
block_id=FillTextTemplateBlock().id,
|
||||
input_default={
|
||||
"format": "{a}, {b}{c}",
|
||||
"format": "{{a}}, {{b}}{{c}}",
|
||||
"values_#_c": "!!!",
|
||||
},
|
||||
),
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
import re
|
||||
|
||||
from jinja2 import BaseLoader
|
||||
from jinja2.sandbox import SandboxedEnvironment
|
||||
|
||||
|
@ -15,8 +13,5 @@ class TextFormatter:
|
|||
self.env.globals.clear()
|
||||
|
||||
def format_string(self, template_str: str, values=None, **kwargs) -> str:
|
||||
# For python.format compatibility: replace all {...} with {{..}}.
|
||||
# But avoid replacing {{...}} to {{{...}}}.
|
||||
template_str = re.sub(r"(?<!{){[ a-zA-Z0-9_]+}", r"{\g<0>}", template_str)
|
||||
template = self.env.from_string(template_str)
|
||||
return template.render(values or {}, **kwargs)
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
Warnings:
|
||||
- You are about replace a single brace string input format for the following blocks:
|
||||
- AgentOutputBlock
|
||||
- FillTextTemplateBlock
|
||||
- AITextGeneratorBlock
|
||||
- AIStructuredResponseGeneratorBlock
|
||||
with a double brace format.
|
||||
*/
|
||||
WITH to_update AS (
|
||||
SELECT
|
||||
"agentBlockId",
|
||||
"constantInput"::jsonb AS j
|
||||
FROM "AgentNode"
|
||||
WHERE
|
||||
"agentBlockId" IN (
|
||||
'363ae599-353e-4804-937e-b2ee3cef3da4', -- AgentOutputBlock
|
||||
'db7d8f02-2f44-4c55-ab7a-eae0941f0c30', -- FillTextTemplateBlock
|
||||
'1f292d4a-41a4-4977-9684-7c8d560b9f91', -- AITextGeneratorBlock
|
||||
'ed55ac19-356e-4243-a6cb-bc599e9b716f' -- AIStructuredResponseGeneratorBlock
|
||||
)
|
||||
AND (
|
||||
"constantInput"::jsonb->>'format' ~ '(?<!\{)\{\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*\}(?!\})'
|
||||
OR "constantInput"::jsonb->>'prompt' ~ '(?<!\{)\{\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*\}(?!\})'
|
||||
OR "constantInput"::jsonb->>'sys_prompt' ~ '(?<!\{)\{\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*\}(?!\})'
|
||||
)
|
||||
),
|
||||
updated_rows AS (
|
||||
SELECT
|
||||
"agentBlockId",
|
||||
(
|
||||
j
|
||||
-- Update "format" if it has a single-brace placeholder
|
||||
|| CASE WHEN j->>'format' ~ '(?<!\{)\{\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*\}(?!\})'
|
||||
THEN jsonb_build_object(
|
||||
'format',
|
||||
regexp_replace(
|
||||
j->>'format',
|
||||
'(?<!\{)\{\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*\}(?!\})',
|
||||
'{{\1}}',
|
||||
'g'
|
||||
)
|
||||
)
|
||||
ELSE '{}'::jsonb
|
||||
END
|
||||
-- Update "prompt" if it has a single-brace placeholder
|
||||
|| CASE WHEN j->>'prompt' ~ '(?<!\{)\{\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*\}(?!\})'
|
||||
THEN jsonb_build_object(
|
||||
'prompt',
|
||||
regexp_replace(
|
||||
j->>'prompt',
|
||||
'(?<!\{)\{\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*\}(?!\})',
|
||||
'{{\1}}',
|
||||
'g'
|
||||
)
|
||||
)
|
||||
ELSE '{}'::jsonb
|
||||
END
|
||||
-- Update "sys_prompt" if it has a single-brace placeholder
|
||||
|| CASE WHEN j->>'sys_prompt' ~ '(?<!\{)\{\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*\}(?!\})'
|
||||
THEN jsonb_build_object(
|
||||
'sys_prompt',
|
||||
regexp_replace(
|
||||
j->>'sys_prompt',
|
||||
'(?<!\{)\{\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*\}(?!\})',
|
||||
'{{\1}}',
|
||||
'g'
|
||||
)
|
||||
)
|
||||
ELSE '{}'::jsonb
|
||||
END
|
||||
)::text AS "newConstantInput"
|
||||
FROM to_update
|
||||
)
|
||||
UPDATE "AgentNode" AS an
|
||||
SET "constantInput" = ur."newConstantInput"
|
||||
FROM updated_rows ur
|
||||
WHERE an."agentBlockId" = ur."agentBlockId";
|
|
@ -102,7 +102,7 @@ async def assert_sample_graph_executions(
|
|||
assert exec.graph_exec_id == graph_exec_id
|
||||
assert exec.output_data == {"output": ["Hello, World!!!"]}
|
||||
assert exec.input_data == {
|
||||
"format": "{a}, {b}{c}",
|
||||
"format": "{{a}}, {{b}}{{c}}",
|
||||
"values": {"a": "Hello", "b": "World", "c": "!!!"},
|
||||
"values_#_a": "Hello",
|
||||
"values_#_b": "World",
|
||||
|
|
After Width: | Height: | Size: 115 KiB |
After Width: | Height: | Size: 88 KiB |
After Width: | Height: | Size: 105 KiB |
After Width: | Height: | Size: 29 KiB |
After Width: | Height: | Size: 6.0 KiB |
After Width: | Height: | Size: 105 KiB |
After Width: | Height: | Size: 116 KiB |
|
@ -1,37 +1,78 @@
|
|||
# Running Ollama with AutoGPT
|
||||
|
||||
Follow these steps to set up and run Ollama and your AutoGPT project:
|
||||
> **Important**: Ollama integration is only available when self-hosting the AutoGPT platform. It cannot be used with the cloud-hosted version.
|
||||
|
||||
1. **Run Ollama**
|
||||
- Open a terminal
|
||||
- Execute the following command:
|
||||
```
|
||||
ollama run llama3
|
||||
```
|
||||
- Leave this terminal running
|
||||
Follow these steps to set up and run Ollama with the AutoGPT platform.
|
||||
|
||||
2. **Run the Backend**
|
||||
- Open a new terminal
|
||||
- Navigate to the backend directory in the AutoGPT project:
|
||||
```
|
||||
cd autogpt_platform/backend/
|
||||
```
|
||||
- Start the backend using Poetry:
|
||||
```
|
||||
poetry run app
|
||||
```
|
||||
## Prerequisites
|
||||
|
||||
3. **Run the Frontend**
|
||||
- Open another terminal
|
||||
- Navigate to the frontend directory in the AutoGPT project:
|
||||
```
|
||||
cd autogpt_platform/frontend/
|
||||
```
|
||||
- Start the frontend development server:
|
||||
```
|
||||
npm run dev
|
||||
```
|
||||
1. Make sure you have gone through and completed the [AutoGPT Setup](/platform/getting-started) steps, if not please do so before continuing with this guide.
|
||||
2. Before starting, ensure you have [Ollama installed](https://ollama.com/download) on your machine.
|
||||
|
||||
4. **Choose the Ollama Model**
|
||||
- Add LLMBlock in the UI
|
||||
- Choose the last option in the model selection dropdown
|
||||
## Setup Steps
|
||||
|
||||
### 1. Launch Ollama
|
||||
Open a new terminal and execute:
|
||||
```bash
|
||||
ollama run llama3.2
|
||||
```
|
||||
|
||||
> **Note**: This will download the [llama3.2](https://ollama.com/library/llama3.2) model and start the service. Keep this terminal running in the background.
|
||||
|
||||
### 2. Start the Backend
|
||||
Open a new terminal and navigate to the autogpt_platform directory:
|
||||
```bash
|
||||
cd autogpt_platform
|
||||
docker compose up -d --build
|
||||
```
|
||||
|
||||
### 3. Start the Frontend
|
||||
Open a new terminal and navigate to the frontend directory:
|
||||
```bash
|
||||
cd autogpt_platform/frontend
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Then visit [http://localhost:3000](http://localhost:3000) to see the frontend running, after registering an account/logging in, navigate to the build page at [http://localhost:3000/build](http://localhost:3000/build)
|
||||
|
||||
### 4. Using Ollama with AutoGPT
|
||||
|
||||
Now that both Ollama and the AutoGPT platform are running we can move onto using Ollama with AutoGPT:
|
||||
|
||||
1. Add an AI Text Generator block to your workspace (it can work with any AI LLM block but for this example will be using the AI Text Generator block):
|
||||

|
||||
|
||||
2. In the "LLM Model" dropdown, select "llama3.2" (This is the model we downloaded earlier)
|
||||

|
||||
|
||||
3. You will see it ask for "Ollama Credentials", simply press "Enter API key"
|
||||

|
||||
|
||||
And you will see "Add new API key for Ollama", In the API key field you can enter anything you want as Ollama does not require an API key, I usually just enter a space, for the Name call it "Ollama" then press "Save & use this API key"
|
||||

|
||||
|
||||
4. After that you will now see the block again, add your prompts then save and then run the graph:
|
||||

|
||||
|
||||
That's it! You've successfully setup the AutoGPT platform and made a LLM call to Ollama.
|
||||

|
||||
|
||||
|
||||
### Using Ollama on a Remote Server with AutoGPT
|
||||
For running Ollama on a remote server, simply make sure the Ollama server is running and is accessible from other devices on your network/remotely through the port 11434, then you can use the same steps above but you need to add the Ollama servers IP address to the "Ollama Host" field in the block settings like so:
|
||||
|
||||

|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter any issues, verify that:
|
||||
|
||||
- Ollama is properly installed and running
|
||||
- All terminals remain open during operation
|
||||
- Docker is running before starting the backend
|
||||
|
||||
For common errors:
|
||||
|
||||
1. **Connection Refused**: Make sure Ollama is running and the host address is correct (also make sure the port is correct, its default is 11434)
|
||||
2. **Model Not Found**: Try running `ollama pull llama3.2` manually first
|
||||
3. **Docker Issues**: Ensure Docker daemon is running with `docker ps`
|