mirror of https://github.com/langgenius/dify.git
feat: support LLM process document file (#10966)
Co-authored-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
parent
556de444e8
commit
08ac36812b
|
@ -3,7 +3,6 @@ from typing import Optional
|
||||||
|
|
||||||
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
|
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
|
||||||
from core.file import file_manager
|
from core.file import file_manager
|
||||||
from core.file.models import FileType
|
|
||||||
from core.model_manager import ModelInstance
|
from core.model_manager import ModelInstance
|
||||||
from core.model_runtime.entities import (
|
from core.model_runtime.entities import (
|
||||||
AssistantPromptMessage,
|
AssistantPromptMessage,
|
||||||
|
@ -103,12 +102,11 @@ class TokenBufferMemory:
|
||||||
prompt_message_contents: list[PromptMessageContent] = []
|
prompt_message_contents: list[PromptMessageContent] = []
|
||||||
prompt_message_contents.append(TextPromptMessageContent(data=message.query))
|
prompt_message_contents.append(TextPromptMessageContent(data=message.query))
|
||||||
for file in file_objs:
|
for file in file_objs:
|
||||||
if file.type in {FileType.IMAGE, FileType.AUDIO}:
|
prompt_message = file_manager.to_prompt_message_content(
|
||||||
prompt_message = file_manager.to_prompt_message_content(
|
file,
|
||||||
file,
|
image_detail_config=detail,
|
||||||
image_detail_config=detail,
|
)
|
||||||
)
|
prompt_message_contents.append(prompt_message)
|
||||||
prompt_message_contents.append(prompt_message)
|
|
||||||
|
|
||||||
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
|
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ class PromptMessageFunction(BaseModel):
|
||||||
function: PromptMessageTool
|
function: PromptMessageTool
|
||||||
|
|
||||||
|
|
||||||
class PromptMessageContentType(Enum):
|
class PromptMessageContentType(str, Enum):
|
||||||
"""
|
"""
|
||||||
Enum class for prompt message content type.
|
Enum class for prompt message content type.
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -7,6 +7,7 @@ features:
|
||||||
- vision
|
- vision
|
||||||
- tool-call
|
- tool-call
|
||||||
- stream-tool-call
|
- stream-tool-call
|
||||||
|
- document
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 1048576
|
context_size: 1048576
|
||||||
|
|
|
@ -7,6 +7,7 @@ features:
|
||||||
- vision
|
- vision
|
||||||
- tool-call
|
- tool-call
|
||||||
- stream-tool-call
|
- stream-tool-call
|
||||||
|
- document
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 1048576
|
context_size: 1048576
|
||||||
|
|
|
@ -7,6 +7,7 @@ features:
|
||||||
- vision
|
- vision
|
||||||
- tool-call
|
- tool-call
|
||||||
- stream-tool-call
|
- stream-tool-call
|
||||||
|
- document
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 1048576
|
context_size: 1048576
|
||||||
|
|
|
@ -7,6 +7,7 @@ features:
|
||||||
- vision
|
- vision
|
||||||
- tool-call
|
- tool-call
|
||||||
- stream-tool-call
|
- stream-tool-call
|
||||||
|
- document
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 1048576
|
context_size: 1048576
|
||||||
|
|
|
@ -7,6 +7,7 @@ features:
|
||||||
- vision
|
- vision
|
||||||
- tool-call
|
- tool-call
|
||||||
- stream-tool-call
|
- stream-tool-call
|
||||||
|
- document
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 1048576
|
context_size: 1048576
|
||||||
|
|
|
@ -7,6 +7,7 @@ features:
|
||||||
- vision
|
- vision
|
||||||
- tool-call
|
- tool-call
|
||||||
- stream-tool-call
|
- stream-tool-call
|
||||||
|
- document
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 1048576
|
context_size: 1048576
|
||||||
|
|
|
@ -7,6 +7,7 @@ features:
|
||||||
- vision
|
- vision
|
||||||
- tool-call
|
- tool-call
|
||||||
- stream-tool-call
|
- stream-tool-call
|
||||||
|
- document
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 1048576
|
context_size: 1048576
|
||||||
|
|
|
@ -7,6 +7,7 @@ features:
|
||||||
- vision
|
- vision
|
||||||
- tool-call
|
- tool-call
|
||||||
- stream-tool-call
|
- stream-tool-call
|
||||||
|
- document
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 2097152
|
context_size: 2097152
|
||||||
|
|
|
@ -7,6 +7,7 @@ features:
|
||||||
- vision
|
- vision
|
||||||
- tool-call
|
- tool-call
|
||||||
- stream-tool-call
|
- stream-tool-call
|
||||||
|
- document
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 2097152
|
context_size: 2097152
|
||||||
|
|
|
@ -7,6 +7,7 @@ features:
|
||||||
- vision
|
- vision
|
||||||
- tool-call
|
- tool-call
|
||||||
- stream-tool-call
|
- stream-tool-call
|
||||||
|
- document
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 2097152
|
context_size: 2097152
|
||||||
|
|
|
@ -7,6 +7,7 @@ features:
|
||||||
- vision
|
- vision
|
||||||
- tool-call
|
- tool-call
|
||||||
- stream-tool-call
|
- stream-tool-call
|
||||||
|
- document
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 2097152
|
context_size: 2097152
|
||||||
|
|
|
@ -7,6 +7,7 @@ features:
|
||||||
- vision
|
- vision
|
||||||
- tool-call
|
- tool-call
|
||||||
- stream-tool-call
|
- stream-tool-call
|
||||||
|
- document
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 2097152
|
context_size: 2097152
|
||||||
|
|
|
@ -7,6 +7,7 @@ features:
|
||||||
- vision
|
- vision
|
||||||
- tool-call
|
- tool-call
|
||||||
- stream-tool-call
|
- stream-tool-call
|
||||||
|
- document
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 2097152
|
context_size: 2097152
|
||||||
|
|
|
@ -7,6 +7,7 @@ features:
|
||||||
- vision
|
- vision
|
||||||
- tool-call
|
- tool-call
|
||||||
- stream-tool-call
|
- stream-tool-call
|
||||||
|
- document
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 32767
|
context_size: 32767
|
||||||
|
|
|
@ -16,6 +16,7 @@ from PIL import Image
|
||||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||||
from core.model_runtime.entities.message_entities import (
|
from core.model_runtime.entities.message_entities import (
|
||||||
AssistantPromptMessage,
|
AssistantPromptMessage,
|
||||||
|
DocumentPromptMessageContent,
|
||||||
ImagePromptMessageContent,
|
ImagePromptMessageContent,
|
||||||
PromptMessage,
|
PromptMessage,
|
||||||
PromptMessageContentType,
|
PromptMessageContentType,
|
||||||
|
@ -35,6 +36,21 @@ from core.model_runtime.errors.invoke import (
|
||||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||||
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||||
|
|
||||||
|
GOOGLE_AVAILABLE_MIMETYPE = [
|
||||||
|
"application/pdf",
|
||||||
|
"application/x-javascript",
|
||||||
|
"text/javascript",
|
||||||
|
"application/x-python",
|
||||||
|
"text/x-python",
|
||||||
|
"text/plain",
|
||||||
|
"text/html",
|
||||||
|
"text/css",
|
||||||
|
"text/md",
|
||||||
|
"text/csv",
|
||||||
|
"text/xml",
|
||||||
|
"text/rtf",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class GoogleLargeLanguageModel(LargeLanguageModel):
|
class GoogleLargeLanguageModel(LargeLanguageModel):
|
||||||
def _invoke(
|
def _invoke(
|
||||||
|
@ -370,6 +386,12 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
|
||||||
raise ValueError(f"Failed to fetch image data from url {message_content.data}, {ex}")
|
raise ValueError(f"Failed to fetch image data from url {message_content.data}, {ex}")
|
||||||
blob = {"inline_data": {"mime_type": mime_type, "data": base64_data}}
|
blob = {"inline_data": {"mime_type": mime_type, "data": base64_data}}
|
||||||
glm_content["parts"].append(blob)
|
glm_content["parts"].append(blob)
|
||||||
|
elif c.type == PromptMessageContentType.DOCUMENT:
|
||||||
|
message_content = cast(DocumentPromptMessageContent, c)
|
||||||
|
if message_content.mime_type not in GOOGLE_AVAILABLE_MIMETYPE:
|
||||||
|
raise ValueError(f"Unsupported mime type {message_content.mime_type}")
|
||||||
|
blob = {"inline_data": {"mime_type": message_content.mime_type, "data": message_content.data}}
|
||||||
|
glm_content["parts"].append(blob)
|
||||||
|
|
||||||
return glm_content
|
return glm_content
|
||||||
elif isinstance(message, AssistantPromptMessage):
|
elif isinstance(message, AssistantPromptMessage):
|
||||||
|
|
|
@ -6,6 +6,7 @@ model_type: llm
|
||||||
features:
|
features:
|
||||||
- vision
|
- vision
|
||||||
- agent-thought
|
- agent-thought
|
||||||
|
- video
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 32000
|
context_size: 32000
|
||||||
|
|
|
@ -6,6 +6,7 @@ model_type: llm
|
||||||
features:
|
features:
|
||||||
- vision
|
- vision
|
||||||
- agent-thought
|
- agent-thought
|
||||||
|
- video
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 32000
|
context_size: 32000
|
||||||
|
|
|
@ -6,6 +6,7 @@ model_type: llm
|
||||||
features:
|
features:
|
||||||
- vision
|
- vision
|
||||||
- agent-thought
|
- agent-thought
|
||||||
|
- video
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 32768
|
context_size: 32768
|
||||||
|
|
|
@ -6,6 +6,7 @@ model_type: llm
|
||||||
features:
|
features:
|
||||||
- vision
|
- vision
|
||||||
- agent-thought
|
- agent-thought
|
||||||
|
- video
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 8000
|
context_size: 8000
|
||||||
|
|
|
@ -6,6 +6,7 @@ model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
features:
|
features:
|
||||||
- vision
|
- vision
|
||||||
|
- video
|
||||||
parameter_rules:
|
parameter_rules:
|
||||||
- name: temperature
|
- name: temperature
|
||||||
use_template: temperature
|
use_template: temperature
|
||||||
|
|
|
@ -26,9 +26,15 @@ class NoPromptFoundError(LLMNodeError):
|
||||||
"""Raised when no prompt is found in the LLM configuration."""
|
"""Raised when no prompt is found in the LLM configuration."""
|
||||||
|
|
||||||
|
|
||||||
class NotSupportedPromptTypeError(LLMNodeError):
|
class TemplateTypeNotSupportError(LLMNodeError):
|
||||||
"""Raised when the prompt type is not supported."""
|
def __init__(self, *, type_name: str):
|
||||||
|
super().__init__(f"Prompt type {type_name} is not supported.")
|
||||||
|
|
||||||
|
|
||||||
class MemoryRolePrefixRequiredError(LLMNodeError):
|
class MemoryRolePrefixRequiredError(LLMNodeError):
|
||||||
"""Raised when memory role prefix is required for completion model."""
|
"""Raised when memory role prefix is required for completion model."""
|
||||||
|
|
||||||
|
|
||||||
|
class FileTypeNotSupportError(LLMNodeError):
|
||||||
|
def __init__(self, *, type_name: str):
|
||||||
|
super().__init__(f"{type_name} type is not supported by this model")
|
||||||
|
|
|
@ -65,6 +65,7 @@ from .entities import (
|
||||||
ModelConfig,
|
ModelConfig,
|
||||||
)
|
)
|
||||||
from .exc import (
|
from .exc import (
|
||||||
|
FileTypeNotSupportError,
|
||||||
InvalidContextStructureError,
|
InvalidContextStructureError,
|
||||||
InvalidVariableTypeError,
|
InvalidVariableTypeError,
|
||||||
LLMModeRequiredError,
|
LLMModeRequiredError,
|
||||||
|
@ -72,7 +73,7 @@ from .exc import (
|
||||||
MemoryRolePrefixRequiredError,
|
MemoryRolePrefixRequiredError,
|
||||||
ModelNotExistError,
|
ModelNotExistError,
|
||||||
NoPromptFoundError,
|
NoPromptFoundError,
|
||||||
NotSupportedPromptTypeError,
|
TemplateTypeNotSupportError,
|
||||||
VariableNotFoundError,
|
VariableNotFoundError,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -621,9 +622,7 @@ class LLMNode(BaseNode[LLMNodeData]):
|
||||||
prompt_content = prompt_messages[0].content.replace("#sys.query#", user_query)
|
prompt_content = prompt_messages[0].content.replace("#sys.query#", user_query)
|
||||||
prompt_messages[0].content = prompt_content
|
prompt_messages[0].content = prompt_content
|
||||||
else:
|
else:
|
||||||
errmsg = f"Prompt type {type(prompt_template)} is not supported"
|
raise TemplateTypeNotSupportError(type_name=str(type(prompt_template)))
|
||||||
logger.warning(errmsg)
|
|
||||||
raise NotSupportedPromptTypeError(errmsg)
|
|
||||||
|
|
||||||
if vision_enabled and user_files:
|
if vision_enabled and user_files:
|
||||||
file_prompts = []
|
file_prompts = []
|
||||||
|
@ -671,7 +670,7 @@ class LLMNode(BaseNode[LLMNodeData]):
|
||||||
and ModelFeature.AUDIO not in model_config.model_schema.features
|
and ModelFeature.AUDIO not in model_config.model_schema.features
|
||||||
)
|
)
|
||||||
):
|
):
|
||||||
continue
|
raise FileTypeNotSupportError(type_name=content_item.type)
|
||||||
prompt_message_content.append(content_item)
|
prompt_message_content.append(content_item)
|
||||||
if len(prompt_message_content) == 1 and prompt_message_content[0].type == PromptMessageContentType.TEXT:
|
if len(prompt_message_content) == 1 and prompt_message_content[0].type == PromptMessageContentType.TEXT:
|
||||||
prompt_message.content = prompt_message_content[0].data
|
prompt_message.content = prompt_message_content[0].data
|
||||||
|
|
|
@ -400,59 +400,6 @@ def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
LLMNodeTestScenario(
|
|
||||||
description="Prompt template with variable selector of File without vision feature",
|
|
||||||
user_query=fake_query,
|
|
||||||
user_files=[],
|
|
||||||
vision_enabled=True,
|
|
||||||
vision_detail=fake_vision_detail,
|
|
||||||
features=[],
|
|
||||||
window_size=fake_window_size,
|
|
||||||
prompt_template=[
|
|
||||||
LLMNodeChatModelMessage(
|
|
||||||
text="{{#input.image#}}",
|
|
||||||
role=PromptMessageRole.USER,
|
|
||||||
edition_type="basic",
|
|
||||||
),
|
|
||||||
],
|
|
||||||
expected_messages=mock_history[fake_window_size * -2 :] + [UserPromptMessage(content=fake_query)],
|
|
||||||
file_variables={
|
|
||||||
"input.image": File(
|
|
||||||
tenant_id="test",
|
|
||||||
type=FileType.IMAGE,
|
|
||||||
filename="test1.jpg",
|
|
||||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
|
||||||
remote_url=fake_remote_url,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
LLMNodeTestScenario(
|
|
||||||
description="Prompt template with variable selector of File with video file and vision feature",
|
|
||||||
user_query=fake_query,
|
|
||||||
user_files=[],
|
|
||||||
vision_enabled=True,
|
|
||||||
vision_detail=fake_vision_detail,
|
|
||||||
features=[ModelFeature.VISION],
|
|
||||||
window_size=fake_window_size,
|
|
||||||
prompt_template=[
|
|
||||||
LLMNodeChatModelMessage(
|
|
||||||
text="{{#input.image#}}",
|
|
||||||
role=PromptMessageRole.USER,
|
|
||||||
edition_type="basic",
|
|
||||||
),
|
|
||||||
],
|
|
||||||
expected_messages=mock_history[fake_window_size * -2 :] + [UserPromptMessage(content=fake_query)],
|
|
||||||
file_variables={
|
|
||||||
"input.image": File(
|
|
||||||
tenant_id="test",
|
|
||||||
type=FileType.VIDEO,
|
|
||||||
filename="test1.mp4",
|
|
||||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
|
||||||
remote_url=fake_remote_url,
|
|
||||||
extension="mp4",
|
|
||||||
)
|
|
||||||
},
|
|
||||||
),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
for scenario in test_scenarios:
|
for scenario in test_scenarios:
|
||||||
|
|
|
@ -12,34 +12,46 @@ import ConfigContext from '@/context/debug-configuration'
|
||||||
// import { Resolution } from '@/types/app'
|
// import { Resolution } from '@/types/app'
|
||||||
import { useFeatures, useFeaturesStore } from '@/app/components/base/features/hooks'
|
import { useFeatures, useFeaturesStore } from '@/app/components/base/features/hooks'
|
||||||
import Switch from '@/app/components/base/switch'
|
import Switch from '@/app/components/base/switch'
|
||||||
import type { FileUpload } from '@/app/components/base/features/types'
|
import { SupportUploadFileTypes } from '@/app/components/workflow/types'
|
||||||
|
|
||||||
const ConfigVision: FC = () => {
|
const ConfigVision: FC = () => {
|
||||||
const { t } = useTranslation()
|
const { t } = useTranslation()
|
||||||
const { isShowVisionConfig } = useContext(ConfigContext)
|
const { isShowVisionConfig, isAllowVideoUpload } = useContext(ConfigContext)
|
||||||
const file = useFeatures(s => s.features.file)
|
const file = useFeatures(s => s.features.file)
|
||||||
const featuresStore = useFeaturesStore()
|
const featuresStore = useFeaturesStore()
|
||||||
|
|
||||||
const handleChange = useCallback((data: FileUpload) => {
|
const isImageEnabled = file?.allowed_file_types?.includes(SupportUploadFileTypes.image) ?? false
|
||||||
|
|
||||||
|
const handleChange = useCallback((value: boolean) => {
|
||||||
const {
|
const {
|
||||||
features,
|
features,
|
||||||
setFeatures,
|
setFeatures,
|
||||||
} = featuresStore!.getState()
|
} = featuresStore!.getState()
|
||||||
|
|
||||||
const newFeatures = produce(features, (draft) => {
|
const newFeatures = produce(features, (draft) => {
|
||||||
draft.file = {
|
if (value) {
|
||||||
...draft.file,
|
draft.file!.allowed_file_types = Array.from(new Set([
|
||||||
enabled: data.enabled,
|
...(draft.file?.allowed_file_types || []),
|
||||||
image: {
|
SupportUploadFileTypes.image,
|
||||||
enabled: data.enabled,
|
...(isAllowVideoUpload ? [SupportUploadFileTypes.video] : []),
|
||||||
detail: data.image?.detail,
|
]))
|
||||||
transfer_methods: data.image?.transfer_methods,
|
}
|
||||||
number_limits: data.image?.number_limits,
|
else {
|
||||||
},
|
draft.file!.allowed_file_types = draft.file!.allowed_file_types?.filter(
|
||||||
|
type => type !== SupportUploadFileTypes.image && (isAllowVideoUpload ? type !== SupportUploadFileTypes.video : true),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (draft.file) {
|
||||||
|
draft.file.enabled = (draft.file.allowed_file_types?.length ?? 0) > 0
|
||||||
|
draft.file.image = {
|
||||||
|
...(draft.file.image || {}),
|
||||||
|
enabled: value,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
setFeatures(newFeatures)
|
setFeatures(newFeatures)
|
||||||
}, [featuresStore])
|
}, [featuresStore, isAllowVideoUpload])
|
||||||
|
|
||||||
if (!isShowVisionConfig)
|
if (!isShowVisionConfig)
|
||||||
return null
|
return null
|
||||||
|
@ -89,11 +101,8 @@ const ConfigVision: FC = () => {
|
||||||
<ParamConfig />
|
<ParamConfig />
|
||||||
<div className='ml-1 mr-3 w-[1px] h-3.5 bg-divider-subtle'></div>
|
<div className='ml-1 mr-3 w-[1px] h-3.5 bg-divider-subtle'></div>
|
||||||
<Switch
|
<Switch
|
||||||
defaultValue={file?.enabled}
|
defaultValue={isImageEnabled}
|
||||||
onChange={value => handleChange({
|
onChange={handleChange}
|
||||||
...(file || {}),
|
|
||||||
enabled: value,
|
|
||||||
})}
|
|
||||||
size='md'
|
size='md'
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -0,0 +1,78 @@
|
||||||
|
'use client'
|
||||||
|
import type { FC } from 'react'
|
||||||
|
import React, { useCallback } from 'react'
|
||||||
|
import { useTranslation } from 'react-i18next'
|
||||||
|
import produce from 'immer'
|
||||||
|
import { useContext } from 'use-context-selector'
|
||||||
|
|
||||||
|
import { Document } from '@/app/components/base/icons/src/vender/features'
|
||||||
|
import Tooltip from '@/app/components/base/tooltip'
|
||||||
|
import ConfigContext from '@/context/debug-configuration'
|
||||||
|
import { SupportUploadFileTypes } from '@/app/components/workflow/types'
|
||||||
|
import { useFeatures, useFeaturesStore } from '@/app/components/base/features/hooks'
|
||||||
|
import Switch from '@/app/components/base/switch'
|
||||||
|
|
||||||
|
const ConfigDocument: FC = () => {
|
||||||
|
const { t } = useTranslation()
|
||||||
|
const file = useFeatures(s => s.features.file)
|
||||||
|
const featuresStore = useFeaturesStore()
|
||||||
|
const { isShowDocumentConfig } = useContext(ConfigContext)
|
||||||
|
|
||||||
|
const isDocumentEnabled = file?.allowed_file_types?.includes(SupportUploadFileTypes.document) ?? false
|
||||||
|
|
||||||
|
const handleChange = useCallback((value: boolean) => {
|
||||||
|
const {
|
||||||
|
features,
|
||||||
|
setFeatures,
|
||||||
|
} = featuresStore!.getState()
|
||||||
|
|
||||||
|
const newFeatures = produce(features, (draft) => {
|
||||||
|
if (value) {
|
||||||
|
draft.file!.allowed_file_types = Array.from(new Set([
|
||||||
|
...(draft.file?.allowed_file_types || []),
|
||||||
|
SupportUploadFileTypes.document,
|
||||||
|
]))
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
draft.file!.allowed_file_types = draft.file!.allowed_file_types?.filter(
|
||||||
|
type => type !== SupportUploadFileTypes.document,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if (draft.file)
|
||||||
|
draft.file.enabled = (draft.file.allowed_file_types?.length ?? 0) > 0
|
||||||
|
})
|
||||||
|
setFeatures(newFeatures)
|
||||||
|
}, [featuresStore])
|
||||||
|
|
||||||
|
if (!isShowDocumentConfig)
|
||||||
|
return null
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className='mt-2 flex items-center gap-2 p-2 rounded-xl border-t-[0.5px] border-l-[0.5px] bg-background-section-burn'>
|
||||||
|
<div className='shrink-0 p-1'>
|
||||||
|
<div className='p-1 rounded-lg border-[0.5px] border-divider-subtle shadow-xs bg-util-colors-indigo-indigo-600'>
|
||||||
|
<Document className='w-4 h-4 text-text-primary-on-surface' />
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div className='grow flex items-center'>
|
||||||
|
<div className='mr-1 text-text-secondary system-sm-semibold'>{t('appDebug.feature.documentUpload.title')}</div>
|
||||||
|
<Tooltip
|
||||||
|
popupContent={
|
||||||
|
<div className='w-[180px]' >
|
||||||
|
{t('appDebug.feature.documentUpload.description')}
|
||||||
|
</div>
|
||||||
|
}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
<div className='shrink-0 flex items-center'>
|
||||||
|
<div className='ml-1 mr-3 w-[1px] h-3.5 bg-divider-subtle'></div>
|
||||||
|
<Switch
|
||||||
|
defaultValue={isDocumentEnabled}
|
||||||
|
onChange={handleChange}
|
||||||
|
size='md'
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
export default React.memo(ConfigDocument)
|
|
@ -7,6 +7,7 @@ import { useFormattingChangedDispatcher } from '../debug/hooks'
|
||||||
import DatasetConfig from '../dataset-config'
|
import DatasetConfig from '../dataset-config'
|
||||||
import HistoryPanel from '../config-prompt/conversation-history/history-panel'
|
import HistoryPanel from '../config-prompt/conversation-history/history-panel'
|
||||||
import ConfigVision from '../config-vision'
|
import ConfigVision from '../config-vision'
|
||||||
|
import ConfigDocument from './config-document'
|
||||||
import AgentTools from './agent/agent-tools'
|
import AgentTools from './agent/agent-tools'
|
||||||
import ConfigContext from '@/context/debug-configuration'
|
import ConfigContext from '@/context/debug-configuration'
|
||||||
import ConfigPrompt from '@/app/components/app/configuration/config-prompt'
|
import ConfigPrompt from '@/app/components/app/configuration/config-prompt'
|
||||||
|
@ -82,6 +83,8 @@ const Config: FC = () => {
|
||||||
|
|
||||||
<ConfigVision />
|
<ConfigVision />
|
||||||
|
|
||||||
|
<ConfigDocument />
|
||||||
|
|
||||||
{/* Chat History */}
|
{/* Chat History */}
|
||||||
{isAdvancedMode && isChatApp && modelModeType === ModelModeType.completion && (
|
{isAdvancedMode && isChatApp && modelModeType === ModelModeType.completion && (
|
||||||
<HistoryPanel
|
<HistoryPanel
|
||||||
|
|
|
@ -451,7 +451,8 @@ const Configuration: FC = () => {
|
||||||
}
|
}
|
||||||
|
|
||||||
const isShowVisionConfig = !!currModel?.features?.includes(ModelFeatureEnum.vision)
|
const isShowVisionConfig = !!currModel?.features?.includes(ModelFeatureEnum.vision)
|
||||||
|
const isShowDocumentConfig = !!currModel?.features?.includes(ModelFeatureEnum.document)
|
||||||
|
const isAllowVideoUpload = !!currModel?.features?.includes(ModelFeatureEnum.video)
|
||||||
// *** web app features ***
|
// *** web app features ***
|
||||||
const featuresData: FeaturesData = useMemo(() => {
|
const featuresData: FeaturesData = useMemo(() => {
|
||||||
return {
|
return {
|
||||||
|
@ -472,7 +473,7 @@ const Configuration: FC = () => {
|
||||||
transfer_methods: modelConfig.file_upload?.image?.transfer_methods || ['local_file', 'remote_url'],
|
transfer_methods: modelConfig.file_upload?.image?.transfer_methods || ['local_file', 'remote_url'],
|
||||||
},
|
},
|
||||||
enabled: !!(modelConfig.file_upload?.enabled || modelConfig.file_upload?.image?.enabled),
|
enabled: !!(modelConfig.file_upload?.enabled || modelConfig.file_upload?.image?.enabled),
|
||||||
allowed_file_types: modelConfig.file_upload?.allowed_file_types || [SupportUploadFileTypes.image, SupportUploadFileTypes.video],
|
allowed_file_types: modelConfig.file_upload?.allowed_file_types || [],
|
||||||
allowed_file_extensions: modelConfig.file_upload?.allowed_file_extensions || [...FILE_EXTS[SupportUploadFileTypes.image], ...FILE_EXTS[SupportUploadFileTypes.video]].map(ext => `.${ext}`),
|
allowed_file_extensions: modelConfig.file_upload?.allowed_file_extensions || [...FILE_EXTS[SupportUploadFileTypes.image], ...FILE_EXTS[SupportUploadFileTypes.video]].map(ext => `.${ext}`),
|
||||||
allowed_file_upload_methods: modelConfig.file_upload?.allowed_file_upload_methods || modelConfig.file_upload?.image?.transfer_methods || ['local_file', 'remote_url'],
|
allowed_file_upload_methods: modelConfig.file_upload?.allowed_file_upload_methods || modelConfig.file_upload?.image?.transfer_methods || ['local_file', 'remote_url'],
|
||||||
number_limits: modelConfig.file_upload?.number_limits || modelConfig.file_upload?.image?.number_limits || 3,
|
number_limits: modelConfig.file_upload?.number_limits || modelConfig.file_upload?.image?.number_limits || 3,
|
||||||
|
@ -861,6 +862,8 @@ const Configuration: FC = () => {
|
||||||
isShowVisionConfig,
|
isShowVisionConfig,
|
||||||
visionConfig,
|
visionConfig,
|
||||||
setVisionConfig: handleSetVisionConfig,
|
setVisionConfig: handleSetVisionConfig,
|
||||||
|
isAllowVideoUpload,
|
||||||
|
isShowDocumentConfig,
|
||||||
rerankSettingModalOpen,
|
rerankSettingModalOpen,
|
||||||
setRerankSettingModalOpen,
|
setRerankSettingModalOpen,
|
||||||
}}
|
}}
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="currentColor">
|
||||||
|
<path d="M20 22H4C3.44772 22 3 21.5523 3 21V3C3 2.44772 3.44772 2 4 2H20C20.5523 2 21 2.44772 21 3V21C21 21.5523 20.5523 22 20 22ZM7 6V10H11V6H7ZM7 12V14H17V12H7ZM7 16V18H17V16H7ZM13 7V9H17V7H13Z"></path>
|
||||||
|
</svg>
|
After Width: | Height: | Size: 292 B |
|
@ -0,0 +1,23 @@
|
||||||
|
{
|
||||||
|
"icon": {
|
||||||
|
"type": "element",
|
||||||
|
"isRootNode": true,
|
||||||
|
"name": "svg",
|
||||||
|
"attributes": {
|
||||||
|
"xmlns": "http://www.w3.org/2000/svg",
|
||||||
|
"viewBox": "0 0 24 24",
|
||||||
|
"fill": "currentColor"
|
||||||
|
},
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"type": "element",
|
||||||
|
"name": "path",
|
||||||
|
"attributes": {
|
||||||
|
"d": "M20 22H4C3.44772 22 3 21.5523 3 21V3C3 2.44772 3.44772 2 4 2H20C20.5523 2 21 2.44772 21 3V21C21 21.5523 20.5523 22 20 22ZM7 6V10H11V6H7ZM7 12V14H17V12H7ZM7 16V18H17V16H7ZM13 7V9H17V7H13Z"
|
||||||
|
},
|
||||||
|
"children": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"name": "Document"
|
||||||
|
}
|
|
@ -0,0 +1,16 @@
|
||||||
|
// GENERATE BY script
|
||||||
|
// DON NOT EDIT IT MANUALLY
|
||||||
|
|
||||||
|
import * as React from 'react'
|
||||||
|
import data from './Document.json'
|
||||||
|
import IconBase from '@/app/components/base/icons/IconBase'
|
||||||
|
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
|
||||||
|
|
||||||
|
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
|
||||||
|
props,
|
||||||
|
ref,
|
||||||
|
) => <IconBase {...props} ref={ref} data={data as IconData} />)
|
||||||
|
|
||||||
|
Icon.displayName = 'Document'
|
||||||
|
|
||||||
|
export default Icon
|
|
@ -7,3 +7,4 @@ export { default as Microphone01 } from './Microphone01'
|
||||||
export { default as TextToAudio } from './TextToAudio'
|
export { default as TextToAudio } from './TextToAudio'
|
||||||
export { default as VirtualAssistant } from './VirtualAssistant'
|
export { default as VirtualAssistant } from './VirtualAssistant'
|
||||||
export { default as Vision } from './Vision'
|
export { default as Vision } from './Vision'
|
||||||
|
export { default as Document } from './Document'
|
||||||
|
|
|
@ -52,6 +52,9 @@ export enum ModelFeatureEnum {
|
||||||
multiToolCall = 'multi-tool-call',
|
multiToolCall = 'multi-tool-call',
|
||||||
agentThought = 'agent-thought',
|
agentThought = 'agent-thought',
|
||||||
vision = 'vision',
|
vision = 'vision',
|
||||||
|
video = 'video',
|
||||||
|
document = 'document',
|
||||||
|
audio = 'audio',
|
||||||
}
|
}
|
||||||
|
|
||||||
export enum ModelFeatureTextEnum {
|
export enum ModelFeatureTextEnum {
|
||||||
|
@ -59,6 +62,9 @@ export enum ModelFeatureTextEnum {
|
||||||
multiToolCall = 'Multi Tool Call',
|
multiToolCall = 'Multi Tool Call',
|
||||||
agentThought = 'Agent Thought',
|
agentThought = 'Agent Thought',
|
||||||
vision = 'Vision',
|
vision = 'Vision',
|
||||||
|
video = 'Video',
|
||||||
|
document = 'Document',
|
||||||
|
audio = 'Audio',
|
||||||
}
|
}
|
||||||
|
|
||||||
export enum ModelStatusEnum {
|
export enum ModelStatusEnum {
|
||||||
|
|
|
@ -97,6 +97,8 @@ type IDebugConfiguration = {
|
||||||
isShowVisionConfig: boolean
|
isShowVisionConfig: boolean
|
||||||
visionConfig: VisionSettings
|
visionConfig: VisionSettings
|
||||||
setVisionConfig: (visionConfig: VisionSettings, noNotice?: boolean) => void
|
setVisionConfig: (visionConfig: VisionSettings, noNotice?: boolean) => void
|
||||||
|
isAllowVideoUpload: boolean
|
||||||
|
isShowDocumentConfig: boolean
|
||||||
rerankSettingModalOpen: boolean
|
rerankSettingModalOpen: boolean
|
||||||
setRerankSettingModalOpen: (rerankSettingModalOpen: boolean) => void
|
setRerankSettingModalOpen: (rerankSettingModalOpen: boolean) => void
|
||||||
}
|
}
|
||||||
|
@ -244,6 +246,8 @@ const DebugConfigurationContext = createContext<IDebugConfiguration>({
|
||||||
transfer_methods: [TransferMethod.remote_url],
|
transfer_methods: [TransferMethod.remote_url],
|
||||||
},
|
},
|
||||||
setVisionConfig: () => { },
|
setVisionConfig: () => { },
|
||||||
|
isAllowVideoUpload: false,
|
||||||
|
isShowDocumentConfig: false,
|
||||||
rerankSettingModalOpen: false,
|
rerankSettingModalOpen: false,
|
||||||
setRerankSettingModalOpen: () => { },
|
setRerankSettingModalOpen: () => { },
|
||||||
})
|
})
|
||||||
|
|
|
@ -218,6 +218,10 @@ const translation = {
|
||||||
enableText: 'Features Enabled',
|
enableText: 'Features Enabled',
|
||||||
manage: 'Manage',
|
manage: 'Manage',
|
||||||
},
|
},
|
||||||
|
documentUpload: {
|
||||||
|
title: 'Document',
|
||||||
|
description: 'Enable Document will allows the model to take in documents and answer questions about them.',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
codegen: {
|
codegen: {
|
||||||
title: 'Code Generator',
|
title: 'Code Generator',
|
||||||
|
|
|
@ -218,6 +218,10 @@ const translation = {
|
||||||
enableText: '功能已开启',
|
enableText: '功能已开启',
|
||||||
manage: '管理',
|
manage: '管理',
|
||||||
},
|
},
|
||||||
|
documentUpload: {
|
||||||
|
title: '文档',
|
||||||
|
description: '启用文档后,模型可以接收文档并回答关于它们的问题。',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
codegen: {
|
codegen: {
|
||||||
title: '代码生成器',
|
title: '代码生成器',
|
||||||
|
|
Loading…
Reference in New Issue