FEAT: NEW WORKFLOW ENGINE (#3160)
Co-authored-by: Joel <iamjoel007@gmail.com> Co-authored-by: Yeuoly <admin@srmxy.cn> Co-authored-by: JzoNg <jzongcode@gmail.com> Co-authored-by: StyleZhang <jasonapring2015@outlook.com> Co-authored-by: jyong <jyong@dify.ai> Co-authored-by: nite-knite <nkCoding@gmail.com> Co-authored-by: jyong <718720800@qq.com>
This commit is contained in:
@@ -66,4 +66,8 @@ JINA_API_KEY=
|
||||
OLLAMA_BASE_URL=
|
||||
|
||||
# Mock Switch
|
||||
MOCK_SWITCH=false
|
||||
MOCK_SWITCH=false
|
||||
|
||||
# CODE EXECUTION CONFIGURATION
|
||||
CODE_EXECUTION_ENDPOINT=
|
||||
CODE_EXECUTINO_API_KEY=
|
||||
@@ -1,10 +1,21 @@
|
||||
import pytest
|
||||
from core.tools.tool_manager import ToolManager
|
||||
|
||||
provider_generator = ToolManager.list_builtin_providers()
|
||||
provider_names = [provider.identity.name for provider in provider_generator]
|
||||
ToolManager.clear_builtin_providers_cache()
|
||||
provider_generator = ToolManager.list_builtin_providers()
|
||||
|
||||
def test_tool_providers():
|
||||
@pytest.mark.parametrize('name', provider_names)
|
||||
def test_tool_providers(benchmark, name):
|
||||
"""
|
||||
Test that all tool providers can be loaded
|
||||
"""
|
||||
providers = ToolManager.list_builtin_providers()
|
||||
for provider in providers:
|
||||
provider.get_tools()
|
||||
|
||||
def test(generator):
|
||||
try:
|
||||
return next(generator)
|
||||
except StopIteration:
|
||||
return None
|
||||
|
||||
benchmark.pedantic(test, args=(provider_generator,), iterations=1, rounds=1)
|
||||
0
api/tests/integration_tests/workflow/__init__.py
Normal file
0
api/tests/integration_tests/workflow/__init__.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import os
|
||||
import pytest
|
||||
|
||||
from typing import Literal
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from core.helper.code_executor.code_executor import CodeExecutor
|
||||
|
||||
MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true'
|
||||
|
||||
class MockedCodeExecutor:
|
||||
@classmethod
|
||||
def invoke(cls, language: Literal['python3', 'javascript', 'jinja2'], code: str, inputs: dict) -> dict:
|
||||
# invoke directly
|
||||
if language == 'python3':
|
||||
return {
|
||||
"result": 3
|
||||
}
|
||||
elif language == 'jinja2':
|
||||
return {
|
||||
"result": "3"
|
||||
}
|
||||
|
||||
@pytest.fixture
|
||||
def setup_code_executor_mock(request, monkeypatch: MonkeyPatch):
|
||||
if not MOCK:
|
||||
yield
|
||||
return
|
||||
|
||||
monkeypatch.setattr(CodeExecutor, "execute_code", MockedCodeExecutor.invoke)
|
||||
yield
|
||||
monkeypatch.undo()
|
||||
85
api/tests/integration_tests/workflow/nodes/__mock/http.py
Normal file
85
api/tests/integration_tests/workflow/nodes/__mock/http.py
Normal file
@@ -0,0 +1,85 @@
|
||||
import os
|
||||
import pytest
|
||||
import requests.api as requests
|
||||
import httpx._api as httpx
|
||||
from requests import Response as RequestsResponse
|
||||
from httpx import Request as HttpxRequest
|
||||
from yarl import URL
|
||||
|
||||
from typing import Literal
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from json import dumps
|
||||
|
||||
MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true'
|
||||
|
||||
class MockedHttp:
|
||||
def requests_request(method: Literal['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'OPTIONS'], url: str,
|
||||
**kwargs) -> RequestsResponse:
|
||||
"""
|
||||
Mocked requests.request
|
||||
"""
|
||||
response = RequestsResponse()
|
||||
response.url = str(URL(url) % kwargs.get('params', {}))
|
||||
response.headers = kwargs.get('headers', {})
|
||||
|
||||
if url == 'http://404.com':
|
||||
response.status_code = 404
|
||||
response._content = b'Not Found'
|
||||
return response
|
||||
|
||||
# get data, files
|
||||
data = kwargs.get('data', None)
|
||||
files = kwargs.get('files', None)
|
||||
|
||||
if data is not None:
|
||||
resp = dumps(data).encode('utf-8')
|
||||
if files is not None:
|
||||
resp = dumps(files).encode('utf-8')
|
||||
else:
|
||||
resp = b'OK'
|
||||
|
||||
response.status_code = 200
|
||||
response._content = resp
|
||||
return response
|
||||
|
||||
def httpx_request(method: Literal['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'OPTIONS'],
|
||||
url: str, **kwargs) -> httpx.Response:
|
||||
"""
|
||||
Mocked httpx.request
|
||||
"""
|
||||
response = httpx.Response(
|
||||
status_code=200,
|
||||
request=HttpxRequest(method, url)
|
||||
)
|
||||
response.headers = kwargs.get('headers', {})
|
||||
|
||||
if url == 'http://404.com':
|
||||
response.status_code = 404
|
||||
response.content = b'Not Found'
|
||||
return response
|
||||
|
||||
# get data, files
|
||||
data = kwargs.get('data', None)
|
||||
files = kwargs.get('files', None)
|
||||
|
||||
if data is not None:
|
||||
resp = dumps(data).encode('utf-8')
|
||||
if files is not None:
|
||||
resp = dumps(files).encode('utf-8')
|
||||
else:
|
||||
resp = b'OK'
|
||||
|
||||
response.status_code = 200
|
||||
response._content = resp
|
||||
return response
|
||||
|
||||
@pytest.fixture
|
||||
def setup_http_mock(request, monkeypatch: MonkeyPatch):
|
||||
if not MOCK:
|
||||
yield
|
||||
return
|
||||
|
||||
monkeypatch.setattr(requests, "request", MockedHttp.requests_request)
|
||||
monkeypatch.setattr(httpx, "request", MockedHttp.httpx_request)
|
||||
yield
|
||||
monkeypatch.undo()
|
||||
342
api/tests/integration_tests/workflow/nodes/test_code.py
Normal file
342
api/tests/integration_tests/workflow/nodes/test_code.py
Normal file
@@ -0,0 +1,342 @@
|
||||
import pytest
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.nodes.code.code_node import CodeNode
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock
|
||||
|
||||
from os import getenv
|
||||
|
||||
CODE_MAX_STRING_LENGTH = int(getenv('CODE_MAX_STRING_LENGTH', '10000'))
|
||||
|
||||
@pytest.mark.parametrize('setup_code_executor_mock', [['none']], indirect=True)
|
||||
def test_execute_code(setup_code_executor_mock):
|
||||
code = '''
|
||||
def main(args1: int, args2: int) -> dict:
|
||||
return {
|
||||
"result": args1 + args2,
|
||||
}
|
||||
'''
|
||||
# trim first 4 spaces at the beginning of each line
|
||||
code = '\n'.join([line[4:] for line in code.split('\n')])
|
||||
node = CodeNode(
|
||||
tenant_id='1',
|
||||
app_id='1',
|
||||
workflow_id='1',
|
||||
user_id='1',
|
||||
user_from=InvokeFrom.WEB_APP,
|
||||
config={
|
||||
'id': '1',
|
||||
'data': {
|
||||
'outputs': {
|
||||
'result': {
|
||||
'type': 'number',
|
||||
},
|
||||
},
|
||||
'title': '123',
|
||||
'variables': [
|
||||
{
|
||||
'variable': 'args1',
|
||||
'value_selector': ['1', '123', 'args1'],
|
||||
},
|
||||
{
|
||||
'variable': 'args2',
|
||||
'value_selector': ['1', '123', 'args2']
|
||||
}
|
||||
],
|
||||
'answer': '123',
|
||||
'code_language': 'python3',
|
||||
'code': code
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(system_variables={}, user_inputs={})
|
||||
pool.append_variable(node_id='1', variable_key_list=['123', 'args1'], value=1)
|
||||
pool.append_variable(node_id='1', variable_key_list=['123', 'args2'], value=2)
|
||||
|
||||
# execute node
|
||||
result = node.run(pool)
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs['result'] == 3
|
||||
assert result.error is None
|
||||
|
||||
@pytest.mark.parametrize('setup_code_executor_mock', [['none']], indirect=True)
|
||||
def test_execute_code_output_validator(setup_code_executor_mock):
|
||||
code = '''
|
||||
def main(args1: int, args2: int) -> dict:
|
||||
return {
|
||||
"result": args1 + args2,
|
||||
}
|
||||
'''
|
||||
# trim first 4 spaces at the beginning of each line
|
||||
code = '\n'.join([line[4:] for line in code.split('\n')])
|
||||
node = CodeNode(
|
||||
tenant_id='1',
|
||||
app_id='1',
|
||||
workflow_id='1',
|
||||
user_id='1',
|
||||
user_from=InvokeFrom.WEB_APP,
|
||||
config={
|
||||
'id': '1',
|
||||
'data': {
|
||||
"outputs": {
|
||||
"result": {
|
||||
"type": "string",
|
||||
},
|
||||
},
|
||||
'title': '123',
|
||||
'variables': [
|
||||
{
|
||||
'variable': 'args1',
|
||||
'value_selector': ['1', '123', 'args1'],
|
||||
},
|
||||
{
|
||||
'variable': 'args2',
|
||||
'value_selector': ['1', '123', 'args2']
|
||||
}
|
||||
],
|
||||
'answer': '123',
|
||||
'code_language': 'python3',
|
||||
'code': code
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(system_variables={}, user_inputs={})
|
||||
pool.append_variable(node_id='1', variable_key_list=['123', 'args1'], value=1)
|
||||
pool.append_variable(node_id='1', variable_key_list=['123', 'args2'], value=2)
|
||||
|
||||
# execute node
|
||||
result = node.run(pool)
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.FAILED
|
||||
assert result.error == 'result in output form must be a string'
|
||||
|
||||
def test_execute_code_output_validator_depth():
|
||||
code = '''
|
||||
def main(args1: int, args2: int) -> dict:
|
||||
return {
|
||||
"result": {
|
||||
"result": args1 + args2,
|
||||
}
|
||||
}
|
||||
'''
|
||||
# trim first 4 spaces at the beginning of each line
|
||||
code = '\n'.join([line[4:] for line in code.split('\n')])
|
||||
node = CodeNode(
|
||||
tenant_id='1',
|
||||
app_id='1',
|
||||
workflow_id='1',
|
||||
user_id='1',
|
||||
user_from=InvokeFrom.WEB_APP,
|
||||
config={
|
||||
'id': '1',
|
||||
'data': {
|
||||
"outputs": {
|
||||
"string_validator": {
|
||||
"type": "string",
|
||||
},
|
||||
"number_validator": {
|
||||
"type": "number",
|
||||
},
|
||||
"number_array_validator": {
|
||||
"type": "array[number]",
|
||||
},
|
||||
"string_array_validator": {
|
||||
"type": "array[string]",
|
||||
},
|
||||
"object_validator": {
|
||||
"type": "object",
|
||||
"children": {
|
||||
"result": {
|
||||
"type": "number",
|
||||
},
|
||||
"depth": {
|
||||
"type": "object",
|
||||
"children": {
|
||||
"depth": {
|
||||
"type": "object",
|
||||
"children": {
|
||||
"depth": {
|
||||
"type": "number",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
'title': '123',
|
||||
'variables': [
|
||||
{
|
||||
'variable': 'args1',
|
||||
'value_selector': ['1', '123', 'args1'],
|
||||
},
|
||||
{
|
||||
'variable': 'args2',
|
||||
'value_selector': ['1', '123', 'args2']
|
||||
}
|
||||
],
|
||||
'answer': '123',
|
||||
'code_language': 'python3',
|
||||
'code': code
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# construct result
|
||||
result = {
|
||||
"number_validator": 1,
|
||||
"string_validator": "1",
|
||||
"number_array_validator": [1, 2, 3, 3.333],
|
||||
"string_array_validator": ["1", "2", "3"],
|
||||
"object_validator": {
|
||||
"result": 1,
|
||||
"depth": {
|
||||
"depth": {
|
||||
"depth": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# validate
|
||||
node._transform_result(result, node.node_data.outputs)
|
||||
|
||||
# construct result
|
||||
result = {
|
||||
"number_validator": "1",
|
||||
"string_validator": 1,
|
||||
"number_array_validator": ["1", "2", "3", "3.333"],
|
||||
"string_array_validator": [1, 2, 3],
|
||||
"object_validator": {
|
||||
"result": "1",
|
||||
"depth": {
|
||||
"depth": {
|
||||
"depth": "1"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# validate
|
||||
with pytest.raises(ValueError):
|
||||
node._transform_result(result, node.node_data.outputs)
|
||||
|
||||
# construct result
|
||||
result = {
|
||||
"number_validator": 1,
|
||||
"string_validator": (CODE_MAX_STRING_LENGTH + 1) * "1",
|
||||
"number_array_validator": [1, 2, 3, 3.333],
|
||||
"string_array_validator": ["1", "2", "3"],
|
||||
"object_validator": {
|
||||
"result": 1,
|
||||
"depth": {
|
||||
"depth": {
|
||||
"depth": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# validate
|
||||
with pytest.raises(ValueError):
|
||||
node._transform_result(result, node.node_data.outputs)
|
||||
|
||||
# construct result
|
||||
result = {
|
||||
"number_validator": 1,
|
||||
"string_validator": "1",
|
||||
"number_array_validator": [1, 2, 3, 3.333] * 2000,
|
||||
"string_array_validator": ["1", "2", "3"],
|
||||
"object_validator": {
|
||||
"result": 1,
|
||||
"depth": {
|
||||
"depth": {
|
||||
"depth": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# validate
|
||||
with pytest.raises(ValueError):
|
||||
node._transform_result(result, node.node_data.outputs)
|
||||
|
||||
|
||||
def test_execute_code_output_object_list():
|
||||
code = '''
|
||||
def main(args1: int, args2: int) -> dict:
|
||||
return {
|
||||
"result": {
|
||||
"result": args1 + args2,
|
||||
}
|
||||
}
|
||||
'''
|
||||
# trim first 4 spaces at the beginning of each line
|
||||
code = '\n'.join([line[4:] for line in code.split('\n')])
|
||||
node = CodeNode(
|
||||
tenant_id='1',
|
||||
app_id='1',
|
||||
workflow_id='1',
|
||||
user_id='1',
|
||||
user_from=InvokeFrom.WEB_APP,
|
||||
config={
|
||||
'id': '1',
|
||||
'data': {
|
||||
"outputs": {
|
||||
"object_list": {
|
||||
"type": "array[object]",
|
||||
},
|
||||
},
|
||||
'title': '123',
|
||||
'variables': [
|
||||
{
|
||||
'variable': 'args1',
|
||||
'value_selector': ['1', '123', 'args1'],
|
||||
},
|
||||
{
|
||||
'variable': 'args2',
|
||||
'value_selector': ['1', '123', 'args2']
|
||||
}
|
||||
],
|
||||
'answer': '123',
|
||||
'code_language': 'python3',
|
||||
'code': code
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# construct result
|
||||
result = {
|
||||
"object_list": [{
|
||||
"result": 1,
|
||||
}, {
|
||||
"result": 2,
|
||||
}, {
|
||||
"result": [1, 2, 3],
|
||||
}]
|
||||
}
|
||||
|
||||
# validate
|
||||
node._transform_result(result, node.node_data.outputs)
|
||||
|
||||
# construct result
|
||||
result = {
|
||||
"object_list": [{
|
||||
"result": 1,
|
||||
}, {
|
||||
"result": 2,
|
||||
}, {
|
||||
"result": [1, 2, 3],
|
||||
}, 1]
|
||||
}
|
||||
|
||||
# validate
|
||||
with pytest.raises(ValueError):
|
||||
node._transform_result(result, node.node_data.outputs)
|
||||
271
api/tests/integration_tests/workflow/nodes/test_http.py
Normal file
271
api/tests/integration_tests/workflow/nodes/test_http.py
Normal file
@@ -0,0 +1,271 @@
|
||||
import pytest
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.nodes.http_request.http_request_node import HttpRequestNode
|
||||
|
||||
from tests.integration_tests.workflow.nodes.__mock.http import setup_http_mock
|
||||
|
||||
BASIC_NODE_DATA = {
|
||||
'tenant_id': '1',
|
||||
'app_id': '1',
|
||||
'workflow_id': '1',
|
||||
'user_id': '1',
|
||||
'user_from': InvokeFrom.WEB_APP,
|
||||
}
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(system_variables={}, user_inputs={})
|
||||
pool.append_variable(node_id='a', variable_key_list=['b123', 'args1'], value=1)
|
||||
pool.append_variable(node_id='a', variable_key_list=['b123', 'args2'], value=2)
|
||||
|
||||
@pytest.mark.parametrize('setup_http_mock', [['none']], indirect=True)
|
||||
def test_get(setup_http_mock):
|
||||
node = HttpRequestNode(config={
|
||||
'id': '1',
|
||||
'data': {
|
||||
'title': 'http',
|
||||
'desc': '',
|
||||
'method': 'get',
|
||||
'url': 'http://example.com',
|
||||
'authorization': {
|
||||
'type': 'api-key',
|
||||
'config': {
|
||||
'type': 'basic',
|
||||
'api_key':'ak-xxx',
|
||||
'header': 'api-key',
|
||||
}
|
||||
},
|
||||
'headers': 'X-Header:123',
|
||||
'params': 'A:b',
|
||||
'body': None,
|
||||
}
|
||||
}, **BASIC_NODE_DATA)
|
||||
|
||||
result = node.run(pool)
|
||||
|
||||
data = result.process_data.get('request', '')
|
||||
|
||||
assert '?A=b' in data
|
||||
assert 'api-key: Basic ak-xxx' in data
|
||||
assert 'X-Header: 123' in data
|
||||
|
||||
@pytest.mark.parametrize('setup_http_mock', [['none']], indirect=True)
|
||||
def test_no_auth(setup_http_mock):
|
||||
node = HttpRequestNode(config={
|
||||
'id': '1',
|
||||
'data': {
|
||||
'title': 'http',
|
||||
'desc': '',
|
||||
'method': 'get',
|
||||
'url': 'http://example.com',
|
||||
'authorization': {
|
||||
'type': 'no-auth',
|
||||
'config': None,
|
||||
},
|
||||
'headers': 'X-Header:123',
|
||||
'params': 'A:b',
|
||||
'body': None,
|
||||
}
|
||||
}, **BASIC_NODE_DATA)
|
||||
|
||||
result = node.run(pool)
|
||||
|
||||
data = result.process_data.get('request', '')
|
||||
|
||||
assert '?A=b' in data
|
||||
assert 'X-Header: 123' in data
|
||||
|
||||
@pytest.mark.parametrize('setup_http_mock', [['none']], indirect=True)
|
||||
def test_custom_authorization_header(setup_http_mock):
|
||||
node = HttpRequestNode(config={
|
||||
'id': '1',
|
||||
'data': {
|
||||
'title': 'http',
|
||||
'desc': '',
|
||||
'method': 'get',
|
||||
'url': 'http://example.com',
|
||||
'authorization': {
|
||||
'type': 'api-key',
|
||||
'config': {
|
||||
'type': 'custom',
|
||||
'api_key': 'Auth',
|
||||
'header': 'X-Auth',
|
||||
},
|
||||
},
|
||||
'headers': 'X-Header:123',
|
||||
'params': 'A:b',
|
||||
'body': None,
|
||||
}
|
||||
}, **BASIC_NODE_DATA)
|
||||
|
||||
result = node.run(pool)
|
||||
|
||||
data = result.process_data.get('request', '')
|
||||
|
||||
assert '?A=b' in data
|
||||
assert 'X-Header: 123' in data
|
||||
assert 'X-Auth: Auth' in data
|
||||
|
||||
@pytest.mark.parametrize('setup_http_mock', [['none']], indirect=True)
|
||||
def test_template(setup_http_mock):
|
||||
node = HttpRequestNode(config={
|
||||
'id': '1',
|
||||
'data': {
|
||||
'title': 'http',
|
||||
'desc': '',
|
||||
'method': 'get',
|
||||
'url': 'http://example.com/{{#a.b123.args2#}}',
|
||||
'authorization': {
|
||||
'type': 'api-key',
|
||||
'config': {
|
||||
'type': 'basic',
|
||||
'api_key':'ak-xxx',
|
||||
'header': 'api-key',
|
||||
}
|
||||
},
|
||||
'headers': 'X-Header:123\nX-Header2:{{#a.b123.args2#}}',
|
||||
'params': 'A:b\nTemplate:{{#a.b123.args2#}}',
|
||||
'body': None,
|
||||
}
|
||||
}, **BASIC_NODE_DATA)
|
||||
|
||||
result = node.run(pool)
|
||||
data = result.process_data.get('request', '')
|
||||
|
||||
assert '?A=b' in data
|
||||
assert 'Template=2' in data
|
||||
assert 'api-key: Basic ak-xxx' in data
|
||||
assert 'X-Header: 123' in data
|
||||
assert 'X-Header2: 2' in data
|
||||
|
||||
@pytest.mark.parametrize('setup_http_mock', [['none']], indirect=True)
|
||||
def test_json(setup_http_mock):
|
||||
node = HttpRequestNode(config={
|
||||
'id': '1',
|
||||
'data': {
|
||||
'title': 'http',
|
||||
'desc': '',
|
||||
'method': 'post',
|
||||
'url': 'http://example.com',
|
||||
'authorization': {
|
||||
'type': 'api-key',
|
||||
'config': {
|
||||
'type': 'basic',
|
||||
'api_key':'ak-xxx',
|
||||
'header': 'api-key',
|
||||
}
|
||||
},
|
||||
'headers': 'X-Header:123',
|
||||
'params': 'A:b',
|
||||
'body': {
|
||||
'type': 'json',
|
||||
'data': '{"a": "{{#a.b123.args1#}}"}'
|
||||
},
|
||||
}
|
||||
}, **BASIC_NODE_DATA)
|
||||
|
||||
result = node.run(pool)
|
||||
data = result.process_data.get('request', '')
|
||||
|
||||
assert '{"a": "1"}' in data
|
||||
assert 'api-key: Basic ak-xxx' in data
|
||||
assert 'X-Header: 123' in data
|
||||
|
||||
def test_x_www_form_urlencoded(setup_http_mock):
|
||||
node = HttpRequestNode(config={
|
||||
'id': '1',
|
||||
'data': {
|
||||
'title': 'http',
|
||||
'desc': '',
|
||||
'method': 'post',
|
||||
'url': 'http://example.com',
|
||||
'authorization': {
|
||||
'type': 'api-key',
|
||||
'config': {
|
||||
'type': 'basic',
|
||||
'api_key':'ak-xxx',
|
||||
'header': 'api-key',
|
||||
}
|
||||
},
|
||||
'headers': 'X-Header:123',
|
||||
'params': 'A:b',
|
||||
'body': {
|
||||
'type': 'x-www-form-urlencoded',
|
||||
'data': 'a:{{#a.b123.args1#}}\nb:{{#a.b123.args2#}}'
|
||||
},
|
||||
}
|
||||
}, **BASIC_NODE_DATA)
|
||||
|
||||
result = node.run(pool)
|
||||
data = result.process_data.get('request', '')
|
||||
|
||||
assert 'a=1&b=2' in data
|
||||
assert 'api-key: Basic ak-xxx' in data
|
||||
assert 'X-Header: 123' in data
|
||||
|
||||
def test_form_data(setup_http_mock):
|
||||
node = HttpRequestNode(config={
|
||||
'id': '1',
|
||||
'data': {
|
||||
'title': 'http',
|
||||
'desc': '',
|
||||
'method': 'post',
|
||||
'url': 'http://example.com',
|
||||
'authorization': {
|
||||
'type': 'api-key',
|
||||
'config': {
|
||||
'type': 'basic',
|
||||
'api_key':'ak-xxx',
|
||||
'header': 'api-key',
|
||||
}
|
||||
},
|
||||
'headers': 'X-Header:123',
|
||||
'params': 'A:b',
|
||||
'body': {
|
||||
'type': 'form-data',
|
||||
'data': 'a:{{#a.b123.args1#}}\nb:{{#a.b123.args2#}}'
|
||||
},
|
||||
}
|
||||
}, **BASIC_NODE_DATA)
|
||||
|
||||
result = node.run(pool)
|
||||
data = result.process_data.get('request', '')
|
||||
|
||||
assert 'form-data; name="a"' in data
|
||||
assert '1' in data
|
||||
assert 'form-data; name="b"' in data
|
||||
assert '2' in data
|
||||
assert 'api-key: Basic ak-xxx' in data
|
||||
assert 'X-Header: 123' in data
|
||||
|
||||
def test_none_data(setup_http_mock):
|
||||
node = HttpRequestNode(config={
|
||||
'id': '1',
|
||||
'data': {
|
||||
'title': 'http',
|
||||
'desc': '',
|
||||
'method': 'post',
|
||||
'url': 'http://example.com',
|
||||
'authorization': {
|
||||
'type': 'api-key',
|
||||
'config': {
|
||||
'type': 'basic',
|
||||
'api_key':'ak-xxx',
|
||||
'header': 'api-key',
|
||||
}
|
||||
},
|
||||
'headers': 'X-Header:123',
|
||||
'params': 'A:b',
|
||||
'body': {
|
||||
'type': 'none',
|
||||
'data': '123123123'
|
||||
},
|
||||
}
|
||||
}, **BASIC_NODE_DATA)
|
||||
|
||||
result = node.run(pool)
|
||||
data = result.process_data.get('request', '')
|
||||
|
||||
assert 'api-key: Basic ak-xxx' in data
|
||||
assert 'X-Header: 123' in data
|
||||
assert '123123123' not in data
|
||||
117
api/tests/integration_tests/workflow/nodes/test_llm.py
Normal file
117
api/tests/integration_tests/workflow/nodes/test_llm.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import os
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
|
||||
from core.entities.provider_configuration import ProviderModelBundle, ProviderConfiguration
|
||||
from core.entities.provider_entities import SystemConfiguration, CustomConfiguration, CustomProviderConfiguration
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.model_providers import ModelProviderFactory
|
||||
from core.workflow.entities.node_entities import SystemVariable
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from core.workflow.nodes.llm.llm_node import LLMNode
|
||||
from extensions.ext_database import db
|
||||
from models.provider import ProviderType
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
|
||||
"""FOR MOCK FIXTURES, DO NOT REMOVE"""
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
||||
|
||||
@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
|
||||
def test_execute_llm(setup_openai_mock):
|
||||
node = LLMNode(
|
||||
tenant_id='1',
|
||||
app_id='1',
|
||||
workflow_id='1',
|
||||
user_id='1',
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
config={
|
||||
'id': 'llm',
|
||||
'data': {
|
||||
'title': '123',
|
||||
'type': 'llm',
|
||||
'model': {
|
||||
'provider': 'openai',
|
||||
'name': 'gpt-3.5-turbo',
|
||||
'mode': 'chat',
|
||||
'completion_params': {}
|
||||
},
|
||||
'prompt_template': [
|
||||
{
|
||||
'role': 'system',
|
||||
'text': 'you are a helpful assistant.\ntoday\'s weather is {{#abc.output#}}.'
|
||||
},
|
||||
{
|
||||
'role': 'user',
|
||||
'text': '{{#sys.query#}}'
|
||||
}
|
||||
],
|
||||
'memory': None,
|
||||
'context': {
|
||||
'enabled': False
|
||||
},
|
||||
'vision': {
|
||||
'enabled': False
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(system_variables={
|
||||
SystemVariable.QUERY: 'what\'s the weather today?',
|
||||
SystemVariable.FILES: [],
|
||||
SystemVariable.CONVERSATION: 'abababa'
|
||||
}, user_inputs={})
|
||||
pool.append_variable(node_id='abc', variable_key_list=['output'], value='sunny')
|
||||
|
||||
credentials = {
|
||||
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||||
}
|
||||
|
||||
provider_instance = ModelProviderFactory().get_provider_instance('openai')
|
||||
model_type_instance = provider_instance.get_model_instance(ModelType.LLM)
|
||||
provider_model_bundle = ProviderModelBundle(
|
||||
configuration=ProviderConfiguration(
|
||||
tenant_id='1',
|
||||
provider=provider_instance.get_provider_schema(),
|
||||
preferred_provider_type=ProviderType.CUSTOM,
|
||||
using_provider_type=ProviderType.CUSTOM,
|
||||
system_configuration=SystemConfiguration(
|
||||
enabled=False
|
||||
),
|
||||
custom_configuration=CustomConfiguration(
|
||||
provider=CustomProviderConfiguration(
|
||||
credentials=credentials
|
||||
)
|
||||
)
|
||||
),
|
||||
provider_instance=provider_instance,
|
||||
model_type_instance=model_type_instance
|
||||
)
|
||||
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model='gpt-3.5-turbo')
|
||||
model_config = ModelConfigWithCredentialsEntity(
|
||||
model='gpt-3.5-turbo',
|
||||
provider='openai',
|
||||
mode='chat',
|
||||
credentials=credentials,
|
||||
parameters={},
|
||||
model_schema=model_type_instance.get_model_schema('gpt-3.5-turbo'),
|
||||
provider_model_bundle=provider_model_bundle
|
||||
)
|
||||
|
||||
# Mock db.session.close()
|
||||
db.session.close = MagicMock()
|
||||
|
||||
node._fetch_model_config = MagicMock(return_value=tuple([model_instance, model_config]))
|
||||
|
||||
# execute node
|
||||
result = node.run(pool)
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs['text'] is not None
|
||||
assert result.outputs['usage']['total_tokens'] > 0
|
||||
@@ -0,0 +1,46 @@
|
||||
import pytest
|
||||
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from core.workflow.nodes.template_transform.template_transform_node import TemplateTransformNode
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock
|
||||
|
||||
@pytest.mark.parametrize('setup_code_executor_mock', [['none']], indirect=True)
|
||||
def test_execute_code(setup_code_executor_mock):
|
||||
code = '''{{args2}}'''
|
||||
node = TemplateTransformNode(
|
||||
tenant_id='1',
|
||||
app_id='1',
|
||||
workflow_id='1',
|
||||
user_id='1',
|
||||
user_from=UserFrom.END_USER,
|
||||
config={
|
||||
'id': '1',
|
||||
'data': {
|
||||
'title': '123',
|
||||
'variables': [
|
||||
{
|
||||
'variable': 'args1',
|
||||
'value_selector': ['1', '123', 'args1'],
|
||||
},
|
||||
{
|
||||
'variable': 'args2',
|
||||
'value_selector': ['1', '123', 'args2']
|
||||
}
|
||||
],
|
||||
'template': code,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(system_variables={}, user_inputs={})
|
||||
pool.append_variable(node_id='1', variable_key_list=['123', 'args1'], value=1)
|
||||
pool.append_variable(node_id='1', variable_key_list=['123', 'args2'], value=3)
|
||||
|
||||
# execute node
|
||||
result = node.run(pool)
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs['output'] == '3'
|
||||
81
api/tests/integration_tests/workflow/nodes/test_tool.py
Normal file
81
api/tests/integration_tests/workflow/nodes/test_tool.py
Normal file
@@ -0,0 +1,81 @@
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.nodes.tool.tool_node import ToolNode
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
|
||||
def test_tool_variable_invoke():
|
||||
pool = VariablePool(system_variables={}, user_inputs={})
|
||||
pool.append_variable(node_id='1', variable_key_list=['123', 'args1'], value='1+1')
|
||||
|
||||
node = ToolNode(
|
||||
tenant_id='1',
|
||||
app_id='1',
|
||||
workflow_id='1',
|
||||
user_id='1',
|
||||
user_from=InvokeFrom.WEB_APP,
|
||||
config={
|
||||
'id': '1',
|
||||
'data': {
|
||||
'title': 'a',
|
||||
'desc': 'a',
|
||||
'provider_id': 'maths',
|
||||
'provider_type': 'builtin',
|
||||
'provider_name': 'maths',
|
||||
'tool_name': 'eval_expression',
|
||||
'tool_label': 'eval_expression',
|
||||
'tool_configurations': {},
|
||||
'tool_parameters': {
|
||||
'expression': {
|
||||
'type': 'variable',
|
||||
'value': ['1', '123', 'args1'],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# execute node
|
||||
result = node.run(pool)
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert '2' in result.outputs['text']
|
||||
assert result.outputs['files'] == []
|
||||
|
||||
def test_tool_mixed_invoke():
|
||||
pool = VariablePool(system_variables={}, user_inputs={})
|
||||
pool.append_variable(node_id='1', variable_key_list=['args1'], value='1+1')
|
||||
|
||||
node = ToolNode(
|
||||
tenant_id='1',
|
||||
app_id='1',
|
||||
workflow_id='1',
|
||||
user_id='1',
|
||||
user_from=InvokeFrom.WEB_APP,
|
||||
config={
|
||||
'id': '1',
|
||||
'data': {
|
||||
'title': 'a',
|
||||
'desc': 'a',
|
||||
'provider_id': 'maths',
|
||||
'provider_type': 'builtin',
|
||||
'provider_name': 'maths',
|
||||
'tool_name': 'eval_expression',
|
||||
'tool_label': 'eval_expression',
|
||||
'tool_configurations': {},
|
||||
'tool_parameters': {
|
||||
'expression': {
|
||||
'type': 'mixed',
|
||||
'value': '{{#1.args1#}}',
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# execute node
|
||||
result = node.run(pool)
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert '2' in result.outputs['text']
|
||||
assert result.outputs['files'] == []
|
||||
1
api/tests/unit_tests/.gitignore
vendored
Normal file
1
api/tests/unit_tests/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.env.test
|
||||
0
api/tests/unit_tests/__init__.py
Normal file
0
api/tests/unit_tests/__init__.py
Normal file
7
api/tests/unit_tests/conftest.py
Normal file
7
api/tests/unit_tests/conftest.py
Normal file
@@ -0,0 +1,7 @@
|
||||
import os
|
||||
|
||||
# Getting the absolute path of the current file's directory
|
||||
ABS_PATH = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# Getting the absolute path of the project's root directory
|
||||
PROJECT_DIR = os.path.abspath(os.path.join(ABS_PATH, os.pardir, os.pardir))
|
||||
0
api/tests/unit_tests/core/__init__.py
Normal file
0
api/tests/unit_tests/core/__init__.py
Normal file
0
api/tests/unit_tests/core/prompt/__init__.py
Normal file
0
api/tests/unit_tests/core/prompt/__init__.py
Normal file
@@ -0,0 +1,211 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from core.app.app_config.entities import ModelConfigEntity, FileExtraConfig
|
||||
from core.file.file_obj import FileVar, FileType, FileTransferMethod
|
||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||
from core.model_runtime.entities.message_entities import UserPromptMessage, AssistantPromptMessage, PromptMessageRole
|
||||
from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
|
||||
from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig, ChatModelMessage
|
||||
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
|
||||
from models.model import Conversation
|
||||
|
||||
|
||||
def test__get_completion_model_prompt_messages():
|
||||
model_config_mock = MagicMock(spec=ModelConfigEntity)
|
||||
model_config_mock.provider = 'openai'
|
||||
model_config_mock.model = 'gpt-3.5-turbo-instruct'
|
||||
|
||||
prompt_template = "Context:\n{{#context#}}\n\nHistories:\n{{#histories#}}\n\nyou are {{name}}."
|
||||
prompt_template_config = CompletionModelPromptTemplate(
|
||||
text=prompt_template
|
||||
)
|
||||
|
||||
memory_config = MemoryConfig(
|
||||
role_prefix=MemoryConfig.RolePrefix(
|
||||
user="Human",
|
||||
assistant="Assistant"
|
||||
),
|
||||
window=MemoryConfig.WindowConfig(
|
||||
enabled=False
|
||||
)
|
||||
)
|
||||
|
||||
inputs = {
|
||||
"name": "John"
|
||||
}
|
||||
files = []
|
||||
context = "I am superman."
|
||||
|
||||
memory = TokenBufferMemory(
|
||||
conversation=Conversation(),
|
||||
model_instance=model_config_mock
|
||||
)
|
||||
|
||||
history_prompt_messages = [
|
||||
UserPromptMessage(content="Hi"),
|
||||
AssistantPromptMessage(content="Hello")
|
||||
]
|
||||
memory.get_history_prompt_messages = MagicMock(return_value=history_prompt_messages)
|
||||
|
||||
prompt_transform = AdvancedPromptTransform()
|
||||
prompt_transform._calculate_rest_token = MagicMock(return_value=2000)
|
||||
prompt_messages = prompt_transform._get_completion_model_prompt_messages(
|
||||
prompt_template=prompt_template_config,
|
||||
inputs=inputs,
|
||||
query=None,
|
||||
files=files,
|
||||
context=context,
|
||||
memory_config=memory_config,
|
||||
memory=memory,
|
||||
model_config=model_config_mock
|
||||
)
|
||||
|
||||
assert len(prompt_messages) == 1
|
||||
assert prompt_messages[0].content == PromptTemplateParser(template=prompt_template).format({
|
||||
"#context#": context,
|
||||
"#histories#": "\n".join([f"{'Human' if prompt.role.value == 'user' else 'Assistant'}: "
|
||||
f"{prompt.content}" for prompt in history_prompt_messages]),
|
||||
**inputs,
|
||||
})
|
||||
|
||||
|
||||
def test__get_chat_model_prompt_messages(get_chat_model_args):
|
||||
model_config_mock, memory_config, messages, inputs, context = get_chat_model_args
|
||||
|
||||
files = []
|
||||
query = "Hi2."
|
||||
|
||||
memory = TokenBufferMemory(
|
||||
conversation=Conversation(),
|
||||
model_instance=model_config_mock
|
||||
)
|
||||
|
||||
history_prompt_messages = [
|
||||
UserPromptMessage(content="Hi1."),
|
||||
AssistantPromptMessage(content="Hello1!")
|
||||
]
|
||||
memory.get_history_prompt_messages = MagicMock(return_value=history_prompt_messages)
|
||||
|
||||
prompt_transform = AdvancedPromptTransform()
|
||||
prompt_transform._calculate_rest_token = MagicMock(return_value=2000)
|
||||
prompt_messages = prompt_transform._get_chat_model_prompt_messages(
|
||||
prompt_template=messages,
|
||||
inputs=inputs,
|
||||
query=query,
|
||||
files=files,
|
||||
context=context,
|
||||
memory_config=memory_config,
|
||||
memory=memory,
|
||||
model_config=model_config_mock
|
||||
)
|
||||
|
||||
assert len(prompt_messages) == 6
|
||||
assert prompt_messages[0].role == PromptMessageRole.SYSTEM
|
||||
assert prompt_messages[0].content == PromptTemplateParser(
|
||||
template=messages[0].text
|
||||
).format({**inputs, "#context#": context})
|
||||
assert prompt_messages[5].content == query
|
||||
|
||||
|
||||
def test__get_chat_model_prompt_messages_no_memory(get_chat_model_args):
|
||||
model_config_mock, _, messages, inputs, context = get_chat_model_args
|
||||
|
||||
files = []
|
||||
|
||||
prompt_transform = AdvancedPromptTransform()
|
||||
prompt_transform._calculate_rest_token = MagicMock(return_value=2000)
|
||||
prompt_messages = prompt_transform._get_chat_model_prompt_messages(
|
||||
prompt_template=messages,
|
||||
inputs=inputs,
|
||||
query=None,
|
||||
files=files,
|
||||
context=context,
|
||||
memory_config=None,
|
||||
memory=None,
|
||||
model_config=model_config_mock
|
||||
)
|
||||
|
||||
assert len(prompt_messages) == 3
|
||||
assert prompt_messages[0].role == PromptMessageRole.SYSTEM
|
||||
assert prompt_messages[0].content == PromptTemplateParser(
|
||||
template=messages[0].text
|
||||
).format({**inputs, "#context#": context})
|
||||
|
||||
|
||||
def test__get_chat_model_prompt_messages_with_files_no_memory(get_chat_model_args):
|
||||
model_config_mock, _, messages, inputs, context = get_chat_model_args
|
||||
|
||||
files = [
|
||||
FileVar(
|
||||
id="file1",
|
||||
tenant_id="tenant1",
|
||||
type=FileType.IMAGE,
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
url="https://example.com/image1.jpg",
|
||||
extra_config=FileExtraConfig(
|
||||
image_config={
|
||||
"detail": "high",
|
||||
}
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
prompt_transform = AdvancedPromptTransform()
|
||||
prompt_transform._calculate_rest_token = MagicMock(return_value=2000)
|
||||
prompt_messages = prompt_transform._get_chat_model_prompt_messages(
|
||||
prompt_template=messages,
|
||||
inputs=inputs,
|
||||
query=None,
|
||||
files=files,
|
||||
context=context,
|
||||
memory_config=None,
|
||||
memory=None,
|
||||
model_config=model_config_mock
|
||||
)
|
||||
|
||||
assert len(prompt_messages) == 4
|
||||
assert prompt_messages[0].role == PromptMessageRole.SYSTEM
|
||||
assert prompt_messages[0].content == PromptTemplateParser(
|
||||
template=messages[0].text
|
||||
).format({**inputs, "#context#": context})
|
||||
assert isinstance(prompt_messages[3].content, list)
|
||||
assert len(prompt_messages[3].content) == 2
|
||||
assert prompt_messages[3].content[1].data == files[0].url
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def get_chat_model_args():
|
||||
model_config_mock = MagicMock(spec=ModelConfigEntity)
|
||||
model_config_mock.provider = 'openai'
|
||||
model_config_mock.model = 'gpt-4'
|
||||
|
||||
memory_config = MemoryConfig(
|
||||
window=MemoryConfig.WindowConfig(
|
||||
enabled=False
|
||||
)
|
||||
)
|
||||
|
||||
prompt_messages = [
|
||||
ChatModelMessage(
|
||||
text="You are a helpful assistant named {{name}}.\n\nContext:\n{{#context#}}",
|
||||
role=PromptMessageRole.SYSTEM
|
||||
),
|
||||
ChatModelMessage(
|
||||
text="Hi.",
|
||||
role=PromptMessageRole.USER
|
||||
),
|
||||
ChatModelMessage(
|
||||
text="Hello!",
|
||||
role=PromptMessageRole.ASSISTANT
|
||||
)
|
||||
]
|
||||
|
||||
inputs = {
|
||||
"name": "John"
|
||||
}
|
||||
|
||||
context = "I am superman."
|
||||
|
||||
return model_config_mock, memory_config, prompt_messages, inputs, context
|
||||
47
api/tests/unit_tests/core/prompt/test_prompt_transform.py
Normal file
47
api/tests/unit_tests/core/prompt/test_prompt_transform.py
Normal file
@@ -0,0 +1,47 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from core.app.app_config.entities import ModelConfigEntity
|
||||
from core.entities.provider_configuration import ProviderModelBundle
|
||||
from core.model_runtime.entities.message_entities import UserPromptMessage
|
||||
from core.model_runtime.entities.model_entities import ModelPropertyKey, AIModelEntity, ParameterRule
|
||||
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||
from core.prompt.prompt_transform import PromptTransform
|
||||
|
||||
|
||||
def test__calculate_rest_token():
|
||||
model_schema_mock = MagicMock(spec=AIModelEntity)
|
||||
parameter_rule_mock = MagicMock(spec=ParameterRule)
|
||||
parameter_rule_mock.name = 'max_tokens'
|
||||
model_schema_mock.parameter_rules = [
|
||||
parameter_rule_mock
|
||||
]
|
||||
model_schema_mock.model_properties = {
|
||||
ModelPropertyKey.CONTEXT_SIZE: 62
|
||||
}
|
||||
|
||||
large_language_model_mock = MagicMock(spec=LargeLanguageModel)
|
||||
large_language_model_mock.get_num_tokens.return_value = 6
|
||||
|
||||
provider_model_bundle_mock = MagicMock(spec=ProviderModelBundle)
|
||||
provider_model_bundle_mock.model_type_instance = large_language_model_mock
|
||||
|
||||
model_config_mock = MagicMock(spec=ModelConfigEntity)
|
||||
model_config_mock.model = 'gpt-4'
|
||||
model_config_mock.credentials = {}
|
||||
model_config_mock.parameters = {
|
||||
'max_tokens': 50
|
||||
}
|
||||
model_config_mock.model_schema = model_schema_mock
|
||||
model_config_mock.provider_model_bundle = provider_model_bundle_mock
|
||||
|
||||
prompt_transform = PromptTransform()
|
||||
|
||||
prompt_messages = [UserPromptMessage(content="Hello, how are you?")]
|
||||
rest_tokens = prompt_transform._calculate_rest_token(prompt_messages, model_config_mock)
|
||||
|
||||
# Validate based on the mock configuration and expected logic
|
||||
expected_rest_tokens = (model_schema_mock.model_properties[ModelPropertyKey.CONTEXT_SIZE]
|
||||
- model_config_mock.parameters['max_tokens']
|
||||
- large_language_model_mock.get_num_tokens.return_value)
|
||||
assert rest_tokens == expected_rest_tokens
|
||||
assert rest_tokens == 6
|
||||
248
api/tests/unit_tests/core/prompt/test_simple_prompt_transform.py
Normal file
248
api/tests/unit_tests/core/prompt/test_simple_prompt_transform.py
Normal file
@@ -0,0 +1,248 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
|
||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||
from core.model_runtime.entities.message_entities import UserPromptMessage, AssistantPromptMessage
|
||||
from core.prompt.simple_prompt_transform import SimplePromptTransform
|
||||
from models.model import AppMode, Conversation
|
||||
|
||||
|
||||
def test_get_common_chat_app_prompt_template_with_pcqm():
|
||||
prompt_transform = SimplePromptTransform()
|
||||
pre_prompt = "You are a helpful assistant."
|
||||
prompt_template = prompt_transform.get_prompt_template(
|
||||
app_mode=AppMode.CHAT,
|
||||
provider="openai",
|
||||
model="gpt-4",
|
||||
pre_prompt=pre_prompt,
|
||||
has_context=True,
|
||||
query_in_prompt=True,
|
||||
with_memory_prompt=True,
|
||||
)
|
||||
prompt_rules = prompt_template['prompt_rules']
|
||||
assert prompt_template['prompt_template'].template == (prompt_rules['context_prompt']
|
||||
+ pre_prompt + '\n'
|
||||
+ prompt_rules['histories_prompt']
|
||||
+ prompt_rules['query_prompt'])
|
||||
assert prompt_template['special_variable_keys'] == ['#context#', '#histories#', '#query#']
|
||||
|
||||
|
||||
def test_get_baichuan_chat_app_prompt_template_with_pcqm():
|
||||
prompt_transform = SimplePromptTransform()
|
||||
pre_prompt = "You are a helpful assistant."
|
||||
prompt_template = prompt_transform.get_prompt_template(
|
||||
app_mode=AppMode.CHAT,
|
||||
provider="baichuan",
|
||||
model="Baichuan2-53B",
|
||||
pre_prompt=pre_prompt,
|
||||
has_context=True,
|
||||
query_in_prompt=True,
|
||||
with_memory_prompt=True,
|
||||
)
|
||||
prompt_rules = prompt_template['prompt_rules']
|
||||
assert prompt_template['prompt_template'].template == (prompt_rules['context_prompt']
|
||||
+ pre_prompt + '\n'
|
||||
+ prompt_rules['histories_prompt']
|
||||
+ prompt_rules['query_prompt'])
|
||||
assert prompt_template['special_variable_keys'] == ['#context#', '#histories#', '#query#']
|
||||
|
||||
|
||||
def test_get_common_completion_app_prompt_template_with_pcq():
|
||||
prompt_transform = SimplePromptTransform()
|
||||
pre_prompt = "You are a helpful assistant."
|
||||
prompt_template = prompt_transform.get_prompt_template(
|
||||
app_mode=AppMode.WORKFLOW,
|
||||
provider="openai",
|
||||
model="gpt-4",
|
||||
pre_prompt=pre_prompt,
|
||||
has_context=True,
|
||||
query_in_prompt=True,
|
||||
with_memory_prompt=False,
|
||||
)
|
||||
prompt_rules = prompt_template['prompt_rules']
|
||||
assert prompt_template['prompt_template'].template == (prompt_rules['context_prompt']
|
||||
+ pre_prompt + '\n'
|
||||
+ prompt_rules['query_prompt'])
|
||||
assert prompt_template['special_variable_keys'] == ['#context#', '#query#']
|
||||
|
||||
|
||||
def test_get_baichuan_completion_app_prompt_template_with_pcq():
|
||||
prompt_transform = SimplePromptTransform()
|
||||
pre_prompt = "You are a helpful assistant."
|
||||
prompt_template = prompt_transform.get_prompt_template(
|
||||
app_mode=AppMode.WORKFLOW,
|
||||
provider="baichuan",
|
||||
model="Baichuan2-53B",
|
||||
pre_prompt=pre_prompt,
|
||||
has_context=True,
|
||||
query_in_prompt=True,
|
||||
with_memory_prompt=False,
|
||||
)
|
||||
print(prompt_template['prompt_template'].template)
|
||||
prompt_rules = prompt_template['prompt_rules']
|
||||
assert prompt_template['prompt_template'].template == (prompt_rules['context_prompt']
|
||||
+ pre_prompt + '\n'
|
||||
+ prompt_rules['query_prompt'])
|
||||
assert prompt_template['special_variable_keys'] == ['#context#', '#query#']
|
||||
|
||||
|
||||
def test_get_common_chat_app_prompt_template_with_q():
|
||||
prompt_transform = SimplePromptTransform()
|
||||
pre_prompt = ""
|
||||
prompt_template = prompt_transform.get_prompt_template(
|
||||
app_mode=AppMode.CHAT,
|
||||
provider="openai",
|
||||
model="gpt-4",
|
||||
pre_prompt=pre_prompt,
|
||||
has_context=False,
|
||||
query_in_prompt=True,
|
||||
with_memory_prompt=False,
|
||||
)
|
||||
prompt_rules = prompt_template['prompt_rules']
|
||||
assert prompt_template['prompt_template'].template == prompt_rules['query_prompt']
|
||||
assert prompt_template['special_variable_keys'] == ['#query#']
|
||||
|
||||
|
||||
def test_get_common_chat_app_prompt_template_with_cq():
|
||||
prompt_transform = SimplePromptTransform()
|
||||
pre_prompt = ""
|
||||
prompt_template = prompt_transform.get_prompt_template(
|
||||
app_mode=AppMode.CHAT,
|
||||
provider="openai",
|
||||
model="gpt-4",
|
||||
pre_prompt=pre_prompt,
|
||||
has_context=True,
|
||||
query_in_prompt=True,
|
||||
with_memory_prompt=False,
|
||||
)
|
||||
prompt_rules = prompt_template['prompt_rules']
|
||||
assert prompt_template['prompt_template'].template == (prompt_rules['context_prompt']
|
||||
+ prompt_rules['query_prompt'])
|
||||
assert prompt_template['special_variable_keys'] == ['#context#', '#query#']
|
||||
|
||||
|
||||
def test_get_common_chat_app_prompt_template_with_p():
|
||||
prompt_transform = SimplePromptTransform()
|
||||
pre_prompt = "you are {{name}}"
|
||||
prompt_template = prompt_transform.get_prompt_template(
|
||||
app_mode=AppMode.CHAT,
|
||||
provider="openai",
|
||||
model="gpt-4",
|
||||
pre_prompt=pre_prompt,
|
||||
has_context=False,
|
||||
query_in_prompt=False,
|
||||
with_memory_prompt=False,
|
||||
)
|
||||
assert prompt_template['prompt_template'].template == pre_prompt + '\n'
|
||||
assert prompt_template['custom_variable_keys'] == ['name']
|
||||
assert prompt_template['special_variable_keys'] == []
|
||||
|
||||
|
||||
def test__get_chat_model_prompt_messages():
|
||||
model_config_mock = MagicMock(spec=ModelConfigWithCredentialsEntity)
|
||||
model_config_mock.provider = 'openai'
|
||||
model_config_mock.model = 'gpt-4'
|
||||
|
||||
memory_mock = MagicMock(spec=TokenBufferMemory)
|
||||
history_prompt_messages = [
|
||||
UserPromptMessage(content="Hi"),
|
||||
AssistantPromptMessage(content="Hello")
|
||||
]
|
||||
memory_mock.get_history_prompt_messages.return_value = history_prompt_messages
|
||||
|
||||
prompt_transform = SimplePromptTransform()
|
||||
prompt_transform._calculate_rest_token = MagicMock(return_value=2000)
|
||||
|
||||
pre_prompt = "You are a helpful assistant {{name}}."
|
||||
inputs = {
|
||||
"name": "John"
|
||||
}
|
||||
context = "yes or no."
|
||||
query = "How are you?"
|
||||
prompt_messages, _ = prompt_transform._get_chat_model_prompt_messages(
|
||||
app_mode=AppMode.CHAT,
|
||||
pre_prompt=pre_prompt,
|
||||
inputs=inputs,
|
||||
query=query,
|
||||
files=[],
|
||||
context=context,
|
||||
memory=memory_mock,
|
||||
model_config=model_config_mock
|
||||
)
|
||||
|
||||
prompt_template = prompt_transform.get_prompt_template(
|
||||
app_mode=AppMode.CHAT,
|
||||
provider=model_config_mock.provider,
|
||||
model=model_config_mock.model,
|
||||
pre_prompt=pre_prompt,
|
||||
has_context=True,
|
||||
query_in_prompt=False,
|
||||
with_memory_prompt=False,
|
||||
)
|
||||
|
||||
full_inputs = {**inputs, '#context#': context}
|
||||
real_system_prompt = prompt_template['prompt_template'].format(full_inputs)
|
||||
|
||||
assert len(prompt_messages) == 4
|
||||
assert prompt_messages[0].content == real_system_prompt
|
||||
assert prompt_messages[1].content == history_prompt_messages[0].content
|
||||
assert prompt_messages[2].content == history_prompt_messages[1].content
|
||||
assert prompt_messages[3].content == query
|
||||
|
||||
|
||||
def test__get_completion_model_prompt_messages():
|
||||
model_config_mock = MagicMock(spec=ModelConfigWithCredentialsEntity)
|
||||
model_config_mock.provider = 'openai'
|
||||
model_config_mock.model = 'gpt-3.5-turbo-instruct'
|
||||
|
||||
memory = TokenBufferMemory(
|
||||
conversation=Conversation(),
|
||||
model_instance=model_config_mock
|
||||
)
|
||||
|
||||
history_prompt_messages = [
|
||||
UserPromptMessage(content="Hi"),
|
||||
AssistantPromptMessage(content="Hello")
|
||||
]
|
||||
memory.get_history_prompt_messages = MagicMock(return_value=history_prompt_messages)
|
||||
|
||||
prompt_transform = SimplePromptTransform()
|
||||
prompt_transform._calculate_rest_token = MagicMock(return_value=2000)
|
||||
pre_prompt = "You are a helpful assistant {{name}}."
|
||||
inputs = {
|
||||
"name": "John"
|
||||
}
|
||||
context = "yes or no."
|
||||
query = "How are you?"
|
||||
prompt_messages, stops = prompt_transform._get_completion_model_prompt_messages(
|
||||
app_mode=AppMode.CHAT,
|
||||
pre_prompt=pre_prompt,
|
||||
inputs=inputs,
|
||||
query=query,
|
||||
files=[],
|
||||
context=context,
|
||||
memory=memory,
|
||||
model_config=model_config_mock
|
||||
)
|
||||
|
||||
prompt_template = prompt_transform.get_prompt_template(
|
||||
app_mode=AppMode.CHAT,
|
||||
provider=model_config_mock.provider,
|
||||
model=model_config_mock.model,
|
||||
pre_prompt=pre_prompt,
|
||||
has_context=True,
|
||||
query_in_prompt=True,
|
||||
with_memory_prompt=True,
|
||||
)
|
||||
|
||||
prompt_rules = prompt_template['prompt_rules']
|
||||
full_inputs = {**inputs, '#context#': context, '#query#': query, '#histories#': memory.get_history_prompt_text(
|
||||
max_token_limit=2000,
|
||||
ai_prefix=prompt_rules['human_prefix'] if 'human_prefix' in prompt_rules else 'Human',
|
||||
human_prefix=prompt_rules['assistant_prefix'] if 'assistant_prefix' in prompt_rules else 'Assistant'
|
||||
)}
|
||||
real_prompt = prompt_template['prompt_template'].format(full_inputs)
|
||||
|
||||
assert len(prompt_messages) == 1
|
||||
assert stops == prompt_rules.get('stops')
|
||||
assert prompt_messages[0].content == real_prompt
|
||||
0
api/tests/unit_tests/core/workflow/__init__.py
Normal file
0
api/tests/unit_tests/core/workflow/__init__.py
Normal file
42
api/tests/unit_tests/core/workflow/nodes/test_answer.py
Normal file
42
api/tests/unit_tests/core/workflow/nodes/test_answer.py
Normal file
@@ -0,0 +1,42 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from core.workflow.entities.node_entities import SystemVariable
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.nodes.answer.answer_node import AnswerNode
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from extensions.ext_database import db
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
|
||||
|
||||
def test_execute_answer():
|
||||
node = AnswerNode(
|
||||
tenant_id='1',
|
||||
app_id='1',
|
||||
workflow_id='1',
|
||||
user_id='1',
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
config={
|
||||
'id': 'answer',
|
||||
'data': {
|
||||
'title': '123',
|
||||
'type': 'answer',
|
||||
'answer': 'Today\'s weather is {{#start.weather#}}\n{{#llm.text#}}\n{{img}}\nFin.'
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(system_variables={
|
||||
SystemVariable.FILES: [],
|
||||
}, user_inputs={})
|
||||
pool.append_variable(node_id='start', variable_key_list=['weather'], value='sunny')
|
||||
pool.append_variable(node_id='llm', variable_key_list=['text'], value='You are a helpful AI.')
|
||||
|
||||
# Mock db.session.close()
|
||||
db.session.close = MagicMock()
|
||||
|
||||
# execute node
|
||||
result = node._run(pool)
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs['answer'] == "Today's weather is sunny\nYou are a helpful AI.\n{{img}}\nFin."
|
||||
193
api/tests/unit_tests/core/workflow/nodes/test_if_else.py
Normal file
193
api/tests/unit_tests/core/workflow/nodes/test_if_else.py
Normal file
@@ -0,0 +1,193 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from core.workflow.entities.node_entities import SystemVariable
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from core.workflow.nodes.if_else.if_else_node import IfElseNode
|
||||
from extensions.ext_database import db
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
|
||||
|
||||
def test_execute_if_else_result_true():
|
||||
node = IfElseNode(
|
||||
tenant_id='1',
|
||||
app_id='1',
|
||||
workflow_id='1',
|
||||
user_id='1',
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
config={
|
||||
'id': 'if-else',
|
||||
'data': {
|
||||
'title': '123',
|
||||
'type': 'if-else',
|
||||
'logical_operator': 'and',
|
||||
'conditions': [
|
||||
{
|
||||
'comparison_operator': 'contains',
|
||||
'variable_selector': ['start', 'array_contains'],
|
||||
'value': 'ab'
|
||||
},
|
||||
{
|
||||
'comparison_operator': 'not contains',
|
||||
'variable_selector': ['start', 'array_not_contains'],
|
||||
'value': 'ab'
|
||||
},
|
||||
{
|
||||
'comparison_operator': 'contains',
|
||||
'variable_selector': ['start', 'contains'],
|
||||
'value': 'ab'
|
||||
},
|
||||
{
|
||||
'comparison_operator': 'not contains',
|
||||
'variable_selector': ['start', 'not_contains'],
|
||||
'value': 'ab'
|
||||
},
|
||||
{
|
||||
'comparison_operator': 'start with',
|
||||
'variable_selector': ['start', 'start_with'],
|
||||
'value': 'ab'
|
||||
},
|
||||
{
|
||||
'comparison_operator': 'end with',
|
||||
'variable_selector': ['start', 'end_with'],
|
||||
'value': 'ab'
|
||||
},
|
||||
{
|
||||
'comparison_operator': 'is',
|
||||
'variable_selector': ['start', 'is'],
|
||||
'value': 'ab'
|
||||
},
|
||||
{
|
||||
'comparison_operator': 'is not',
|
||||
'variable_selector': ['start', 'is_not'],
|
||||
'value': 'ab'
|
||||
},
|
||||
{
|
||||
'comparison_operator': 'empty',
|
||||
'variable_selector': ['start', 'empty'],
|
||||
'value': 'ab'
|
||||
},
|
||||
{
|
||||
'comparison_operator': 'not empty',
|
||||
'variable_selector': ['start', 'not_empty'],
|
||||
'value': 'ab'
|
||||
},
|
||||
{
|
||||
'comparison_operator': '=',
|
||||
'variable_selector': ['start', 'equals'],
|
||||
'value': '22'
|
||||
},
|
||||
{
|
||||
'comparison_operator': '≠',
|
||||
'variable_selector': ['start', 'not_equals'],
|
||||
'value': '22'
|
||||
},
|
||||
{
|
||||
'comparison_operator': '>',
|
||||
'variable_selector': ['start', 'greater_than'],
|
||||
'value': '22'
|
||||
},
|
||||
{
|
||||
'comparison_operator': '<',
|
||||
'variable_selector': ['start', 'less_than'],
|
||||
'value': '22'
|
||||
},
|
||||
{
|
||||
'comparison_operator': '≥',
|
||||
'variable_selector': ['start', 'greater_than_or_equal'],
|
||||
'value': '22'
|
||||
},
|
||||
{
|
||||
'comparison_operator': '≤',
|
||||
'variable_selector': ['start', 'less_than_or_equal'],
|
||||
'value': '22'
|
||||
},
|
||||
{
|
||||
'comparison_operator': 'null',
|
||||
'variable_selector': ['start', 'null']
|
||||
},
|
||||
{
|
||||
'comparison_operator': 'not null',
|
||||
'variable_selector': ['start', 'not_null']
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(system_variables={
|
||||
SystemVariable.FILES: [],
|
||||
}, user_inputs={})
|
||||
pool.append_variable(node_id='start', variable_key_list=['array_contains'], value=['ab', 'def'])
|
||||
pool.append_variable(node_id='start', variable_key_list=['array_not_contains'], value=['ac', 'def'])
|
||||
pool.append_variable(node_id='start', variable_key_list=['contains'], value='cabcde')
|
||||
pool.append_variable(node_id='start', variable_key_list=['not_contains'], value='zacde')
|
||||
pool.append_variable(node_id='start', variable_key_list=['start_with'], value='abc')
|
||||
pool.append_variable(node_id='start', variable_key_list=['end_with'], value='zzab')
|
||||
pool.append_variable(node_id='start', variable_key_list=['is'], value='ab')
|
||||
pool.append_variable(node_id='start', variable_key_list=['is_not'], value='aab')
|
||||
pool.append_variable(node_id='start', variable_key_list=['empty'], value='')
|
||||
pool.append_variable(node_id='start', variable_key_list=['not_empty'], value='aaa')
|
||||
pool.append_variable(node_id='start', variable_key_list=['equals'], value=22)
|
||||
pool.append_variable(node_id='start', variable_key_list=['not_equals'], value=23)
|
||||
pool.append_variable(node_id='start', variable_key_list=['greater_than'], value=23)
|
||||
pool.append_variable(node_id='start', variable_key_list=['less_than'], value=21)
|
||||
pool.append_variable(node_id='start', variable_key_list=['greater_than_or_equal'], value=22)
|
||||
pool.append_variable(node_id='start', variable_key_list=['less_than_or_equal'], value=21)
|
||||
pool.append_variable(node_id='start', variable_key_list=['not_null'], value='1212')
|
||||
|
||||
# Mock db.session.close()
|
||||
db.session.close = MagicMock()
|
||||
|
||||
# execute node
|
||||
result = node._run(pool)
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs['result'] is True
|
||||
|
||||
|
||||
def test_execute_if_else_result_false():
|
||||
node = IfElseNode(
|
||||
tenant_id='1',
|
||||
app_id='1',
|
||||
workflow_id='1',
|
||||
user_id='1',
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
config={
|
||||
'id': 'if-else',
|
||||
'data': {
|
||||
'title': '123',
|
||||
'type': 'if-else',
|
||||
'logical_operator': 'or',
|
||||
'conditions': [
|
||||
{
|
||||
'comparison_operator': 'contains',
|
||||
'variable_selector': ['start', 'array_contains'],
|
||||
'value': 'ab'
|
||||
},
|
||||
{
|
||||
'comparison_operator': 'not contains',
|
||||
'variable_selector': ['start', 'array_not_contains'],
|
||||
'value': 'ab'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(system_variables={
|
||||
SystemVariable.FILES: [],
|
||||
}, user_inputs={})
|
||||
pool.append_variable(node_id='start', variable_key_list=['array_contains'], value=['1ab', 'def'])
|
||||
pool.append_variable(node_id='start', variable_key_list=['array_not_contains'], value=['ab', 'def'])
|
||||
|
||||
# Mock db.session.close()
|
||||
db.session.close = MagicMock()
|
||||
|
||||
# execute node
|
||||
result = node._run(pool)
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs['result'] is False
|
||||
0
api/tests/unit_tests/services/__init__.py
Normal file
0
api/tests/unit_tests/services/__init__.py
Normal file
0
api/tests/unit_tests/services/workflow/__init__.py
Normal file
0
api/tests/unit_tests/services/workflow/__init__.py
Normal file
@@ -0,0 +1,462 @@
|
||||
# test for api/services/workflow/workflow_converter.py
|
||||
import json
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from core.app.app_config.entities import VariableEntity, ExternalDataVariableEntity, DatasetEntity, \
|
||||
DatasetRetrieveConfigEntity, ModelConfigEntity, PromptTemplateEntity, AdvancedChatPromptTemplateEntity, \
|
||||
AdvancedChatMessageEntity, AdvancedCompletionPromptTemplateEntity
|
||||
from core.helper import encrypter
|
||||
from core.model_runtime.entities.llm_entities import LLMMode
|
||||
from core.model_runtime.entities.message_entities import PromptMessageRole
|
||||
from models.api_based_extension import APIBasedExtension, APIBasedExtensionPoint
|
||||
from models.model import AppMode
|
||||
from services.workflow.workflow_converter import WorkflowConverter
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def default_variables():
|
||||
return [
|
||||
VariableEntity(
|
||||
variable="text_input",
|
||||
label="text-input",
|
||||
type=VariableEntity.Type.TEXT_INPUT
|
||||
),
|
||||
VariableEntity(
|
||||
variable="paragraph",
|
||||
label="paragraph",
|
||||
type=VariableEntity.Type.PARAGRAPH
|
||||
),
|
||||
VariableEntity(
|
||||
variable="select",
|
||||
label="select",
|
||||
type=VariableEntity.Type.SELECT
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def test__convert_to_start_node(default_variables):
|
||||
# act
|
||||
result = WorkflowConverter()._convert_to_start_node(default_variables)
|
||||
|
||||
# assert
|
||||
assert isinstance(result["data"]["variables"][0]["type"], str)
|
||||
assert result["data"]["variables"][0]["type"] == "text-input"
|
||||
assert result["data"]["variables"][0]["variable"] == "text_input"
|
||||
assert result["data"]["variables"][1]["variable"] == "paragraph"
|
||||
assert result["data"]["variables"][2]["variable"] == "select"
|
||||
|
||||
|
||||
def test__convert_to_http_request_node_for_chatbot(default_variables):
|
||||
"""
|
||||
Test convert to http request nodes for chatbot
|
||||
:return:
|
||||
"""
|
||||
app_model = MagicMock()
|
||||
app_model.id = "app_id"
|
||||
app_model.tenant_id = "tenant_id"
|
||||
app_model.mode = AppMode.CHAT.value
|
||||
|
||||
api_based_extension_id = "api_based_extension_id"
|
||||
mock_api_based_extension = APIBasedExtension(
|
||||
id=api_based_extension_id,
|
||||
name="api-1",
|
||||
api_key="encrypted_api_key",
|
||||
api_endpoint="https://dify.ai",
|
||||
)
|
||||
|
||||
workflow_converter = WorkflowConverter()
|
||||
workflow_converter._get_api_based_extension = MagicMock(return_value=mock_api_based_extension)
|
||||
|
||||
encrypter.decrypt_token = MagicMock(return_value="api_key")
|
||||
|
||||
external_data_variables = [
|
||||
ExternalDataVariableEntity(
|
||||
variable="external_variable",
|
||||
type="api",
|
||||
config={
|
||||
"api_based_extension_id": api_based_extension_id
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
nodes = workflow_converter._convert_to_http_request_node(
|
||||
app_model=app_model,
|
||||
variables=default_variables,
|
||||
external_data_variables=external_data_variables
|
||||
)
|
||||
|
||||
assert len(nodes) == 2
|
||||
assert nodes[0]["data"]["type"] == "http-request"
|
||||
|
||||
http_request_node = nodes[0]
|
||||
|
||||
assert http_request_node["data"]["method"] == "post"
|
||||
assert http_request_node["data"]["url"] == mock_api_based_extension.api_endpoint
|
||||
assert http_request_node["data"]["authorization"]["type"] == "api-key"
|
||||
assert http_request_node["data"]["authorization"]["config"] == {
|
||||
"type": "bearer",
|
||||
"api_key": "api_key"
|
||||
}
|
||||
assert http_request_node["data"]["body"]["type"] == "json"
|
||||
|
||||
body_data = http_request_node["data"]["body"]["data"]
|
||||
|
||||
assert body_data
|
||||
|
||||
body_data_json = json.loads(body_data)
|
||||
assert body_data_json["point"] == APIBasedExtensionPoint.APP_EXTERNAL_DATA_TOOL_QUERY.value
|
||||
|
||||
body_params = body_data_json["params"]
|
||||
assert body_params["app_id"] == app_model.id
|
||||
assert body_params["tool_variable"] == external_data_variables[0].variable
|
||||
assert len(body_params["inputs"]) == 3
|
||||
assert body_params["query"] == "{{#sys.query#}}" # for chatbot
|
||||
|
||||
code_node = nodes[1]
|
||||
assert code_node["data"]["type"] == "code"
|
||||
|
||||
|
||||
def test__convert_to_http_request_node_for_workflow_app(default_variables):
|
||||
"""
|
||||
Test convert to http request nodes for workflow app
|
||||
:return:
|
||||
"""
|
||||
app_model = MagicMock()
|
||||
app_model.id = "app_id"
|
||||
app_model.tenant_id = "tenant_id"
|
||||
app_model.mode = AppMode.WORKFLOW.value
|
||||
|
||||
api_based_extension_id = "api_based_extension_id"
|
||||
mock_api_based_extension = APIBasedExtension(
|
||||
id=api_based_extension_id,
|
||||
name="api-1",
|
||||
api_key="encrypted_api_key",
|
||||
api_endpoint="https://dify.ai",
|
||||
)
|
||||
|
||||
workflow_converter = WorkflowConverter()
|
||||
workflow_converter._get_api_based_extension = MagicMock(return_value=mock_api_based_extension)
|
||||
|
||||
encrypter.decrypt_token = MagicMock(return_value="api_key")
|
||||
|
||||
external_data_variables = [
|
||||
ExternalDataVariableEntity(
|
||||
variable="external_variable",
|
||||
type="api",
|
||||
config={
|
||||
"api_based_extension_id": api_based_extension_id
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
nodes = workflow_converter._convert_to_http_request_node(
|
||||
app_model=app_model,
|
||||
variables=default_variables,
|
||||
external_data_variables=external_data_variables
|
||||
)
|
||||
|
||||
assert len(nodes) == 2
|
||||
assert nodes[0]["data"]["type"] == "http-request"
|
||||
|
||||
http_request_node = nodes[0]
|
||||
|
||||
assert http_request_node["data"]["method"] == "post"
|
||||
assert http_request_node["data"]["url"] == mock_api_based_extension.api_endpoint
|
||||
assert http_request_node["data"]["authorization"]["type"] == "api-key"
|
||||
assert http_request_node["data"]["authorization"]["config"] == {
|
||||
"type": "bearer",
|
||||
"api_key": "api_key"
|
||||
}
|
||||
assert http_request_node["data"]["body"]["type"] == "json"
|
||||
|
||||
body_data = http_request_node["data"]["body"]["data"]
|
||||
|
||||
assert body_data
|
||||
|
||||
body_data_json = json.loads(body_data)
|
||||
assert body_data_json["point"] == APIBasedExtensionPoint.APP_EXTERNAL_DATA_TOOL_QUERY.value
|
||||
|
||||
body_params = body_data_json["params"]
|
||||
assert body_params["app_id"] == app_model.id
|
||||
assert body_params["tool_variable"] == external_data_variables[0].variable
|
||||
assert len(body_params["inputs"]) == 3
|
||||
assert body_params["query"] == ""
|
||||
|
||||
code_node = nodes[1]
|
||||
assert code_node["data"]["type"] == "code"
|
||||
|
||||
|
||||
def test__convert_to_knowledge_retrieval_node_for_chatbot():
|
||||
new_app_mode = AppMode.ADVANCED_CHAT
|
||||
|
||||
dataset_config = DatasetEntity(
|
||||
dataset_ids=["dataset_id_1", "dataset_id_2"],
|
||||
retrieve_config=DatasetRetrieveConfigEntity(
|
||||
retrieve_strategy=DatasetRetrieveConfigEntity.RetrieveStrategy.MULTIPLE,
|
||||
top_k=5,
|
||||
score_threshold=0.8,
|
||||
reranking_model={
|
||||
'reranking_provider_name': 'cohere',
|
||||
'reranking_model_name': 'rerank-english-v2.0'
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
model_config = ModelConfigEntity(
|
||||
provider='openai',
|
||||
model='gpt-4',
|
||||
mode='chat',
|
||||
parameters={},
|
||||
stop=[]
|
||||
)
|
||||
|
||||
node = WorkflowConverter()._convert_to_knowledge_retrieval_node(
|
||||
new_app_mode=new_app_mode,
|
||||
dataset_config=dataset_config,
|
||||
model_config=model_config
|
||||
)
|
||||
|
||||
assert node["data"]["type"] == "knowledge-retrieval"
|
||||
assert node["data"]["query_variable_selector"] == ["sys", "query"]
|
||||
assert node["data"]["dataset_ids"] == dataset_config.dataset_ids
|
||||
assert (node["data"]["retrieval_mode"]
|
||||
== dataset_config.retrieve_config.retrieve_strategy.value)
|
||||
assert node["data"]["multiple_retrieval_config"] == {
|
||||
"top_k": dataset_config.retrieve_config.top_k,
|
||||
"score_threshold": dataset_config.retrieve_config.score_threshold,
|
||||
"reranking_model": dataset_config.retrieve_config.reranking_model
|
||||
}
|
||||
|
||||
|
||||
def test__convert_to_knowledge_retrieval_node_for_workflow_app():
|
||||
new_app_mode = AppMode.WORKFLOW
|
||||
|
||||
dataset_config = DatasetEntity(
|
||||
dataset_ids=["dataset_id_1", "dataset_id_2"],
|
||||
retrieve_config=DatasetRetrieveConfigEntity(
|
||||
query_variable="query",
|
||||
retrieve_strategy=DatasetRetrieveConfigEntity.RetrieveStrategy.MULTIPLE,
|
||||
top_k=5,
|
||||
score_threshold=0.8,
|
||||
reranking_model={
|
||||
'reranking_provider_name': 'cohere',
|
||||
'reranking_model_name': 'rerank-english-v2.0'
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
model_config = ModelConfigEntity(
|
||||
provider='openai',
|
||||
model='gpt-4',
|
||||
mode='chat',
|
||||
parameters={},
|
||||
stop=[]
|
||||
)
|
||||
|
||||
node = WorkflowConverter()._convert_to_knowledge_retrieval_node(
|
||||
new_app_mode=new_app_mode,
|
||||
dataset_config=dataset_config,
|
||||
model_config=model_config
|
||||
)
|
||||
|
||||
assert node["data"]["type"] == "knowledge-retrieval"
|
||||
assert node["data"]["query_variable_selector"] == ["start", dataset_config.retrieve_config.query_variable]
|
||||
assert node["data"]["dataset_ids"] == dataset_config.dataset_ids
|
||||
assert (node["data"]["retrieval_mode"]
|
||||
== dataset_config.retrieve_config.retrieve_strategy.value)
|
||||
assert node["data"]["multiple_retrieval_config"] == {
|
||||
"top_k": dataset_config.retrieve_config.top_k,
|
||||
"score_threshold": dataset_config.retrieve_config.score_threshold,
|
||||
"reranking_model": dataset_config.retrieve_config.reranking_model
|
||||
}
|
||||
|
||||
|
||||
def test__convert_to_llm_node_for_chatbot_simple_chat_model(default_variables):
|
||||
new_app_mode = AppMode.ADVANCED_CHAT
|
||||
model = "gpt-4"
|
||||
model_mode = LLMMode.CHAT
|
||||
|
||||
workflow_converter = WorkflowConverter()
|
||||
start_node = workflow_converter._convert_to_start_node(default_variables)
|
||||
graph = {
|
||||
"nodes": [
|
||||
start_node
|
||||
],
|
||||
"edges": [] # no need
|
||||
}
|
||||
|
||||
model_config_mock = MagicMock(spec=ModelConfigEntity)
|
||||
model_config_mock.provider = 'openai'
|
||||
model_config_mock.model = model
|
||||
model_config_mock.mode = model_mode.value
|
||||
model_config_mock.parameters = {}
|
||||
model_config_mock.stop = []
|
||||
|
||||
prompt_template = PromptTemplateEntity(
|
||||
prompt_type=PromptTemplateEntity.PromptType.SIMPLE,
|
||||
simple_prompt_template="You are a helpful assistant {{text_input}}, {{paragraph}}, {{select}}."
|
||||
)
|
||||
|
||||
llm_node = workflow_converter._convert_to_llm_node(
|
||||
original_app_mode=AppMode.CHAT,
|
||||
new_app_mode=new_app_mode,
|
||||
model_config=model_config_mock,
|
||||
graph=graph,
|
||||
prompt_template=prompt_template
|
||||
)
|
||||
|
||||
assert llm_node["data"]["type"] == "llm"
|
||||
assert llm_node["data"]["model"]['name'] == model
|
||||
assert llm_node["data"]['model']["mode"] == model_mode.value
|
||||
template = prompt_template.simple_prompt_template
|
||||
for v in default_variables:
|
||||
template = template.replace('{{' + v.variable + '}}', '{{#start.' + v.variable + '#}}')
|
||||
assert llm_node["data"]["prompt_template"][0]['text'] == template + '\n'
|
||||
assert llm_node["data"]['context']['enabled'] is False
|
||||
|
||||
|
||||
def test__convert_to_llm_node_for_chatbot_simple_completion_model(default_variables):
|
||||
new_app_mode = AppMode.ADVANCED_CHAT
|
||||
model = "gpt-3.5-turbo-instruct"
|
||||
model_mode = LLMMode.COMPLETION
|
||||
|
||||
workflow_converter = WorkflowConverter()
|
||||
start_node = workflow_converter._convert_to_start_node(default_variables)
|
||||
graph = {
|
||||
"nodes": [
|
||||
start_node
|
||||
],
|
||||
"edges": [] # no need
|
||||
}
|
||||
|
||||
model_config_mock = MagicMock(spec=ModelConfigEntity)
|
||||
model_config_mock.provider = 'openai'
|
||||
model_config_mock.model = model
|
||||
model_config_mock.mode = model_mode.value
|
||||
model_config_mock.parameters = {}
|
||||
model_config_mock.stop = []
|
||||
|
||||
prompt_template = PromptTemplateEntity(
|
||||
prompt_type=PromptTemplateEntity.PromptType.SIMPLE,
|
||||
simple_prompt_template="You are a helpful assistant {{text_input}}, {{paragraph}}, {{select}}."
|
||||
)
|
||||
|
||||
llm_node = workflow_converter._convert_to_llm_node(
|
||||
original_app_mode=AppMode.CHAT,
|
||||
new_app_mode=new_app_mode,
|
||||
model_config=model_config_mock,
|
||||
graph=graph,
|
||||
prompt_template=prompt_template
|
||||
)
|
||||
|
||||
assert llm_node["data"]["type"] == "llm"
|
||||
assert llm_node["data"]["model"]['name'] == model
|
||||
assert llm_node["data"]['model']["mode"] == model_mode.value
|
||||
template = prompt_template.simple_prompt_template
|
||||
for v in default_variables:
|
||||
template = template.replace('{{' + v.variable + '}}', '{{#start.' + v.variable + '#}}')
|
||||
assert llm_node["data"]["prompt_template"]['text'] == template + '\n'
|
||||
assert llm_node["data"]['context']['enabled'] is False
|
||||
|
||||
|
||||
def test__convert_to_llm_node_for_chatbot_advanced_chat_model(default_variables):
|
||||
new_app_mode = AppMode.ADVANCED_CHAT
|
||||
model = "gpt-4"
|
||||
model_mode = LLMMode.CHAT
|
||||
|
||||
workflow_converter = WorkflowConverter()
|
||||
start_node = workflow_converter._convert_to_start_node(default_variables)
|
||||
graph = {
|
||||
"nodes": [
|
||||
start_node
|
||||
],
|
||||
"edges": [] # no need
|
||||
}
|
||||
|
||||
model_config_mock = MagicMock(spec=ModelConfigEntity)
|
||||
model_config_mock.provider = 'openai'
|
||||
model_config_mock.model = model
|
||||
model_config_mock.mode = model_mode.value
|
||||
model_config_mock.parameters = {}
|
||||
model_config_mock.stop = []
|
||||
|
||||
prompt_template = PromptTemplateEntity(
|
||||
prompt_type=PromptTemplateEntity.PromptType.ADVANCED,
|
||||
advanced_chat_prompt_template=AdvancedChatPromptTemplateEntity(messages=[
|
||||
AdvancedChatMessageEntity(text="You are a helpful assistant named {{name}}.\n\nContext:\n{{#context#}}",
|
||||
role=PromptMessageRole.SYSTEM),
|
||||
AdvancedChatMessageEntity(text="Hi.", role=PromptMessageRole.USER),
|
||||
AdvancedChatMessageEntity(text="Hello!", role=PromptMessageRole.ASSISTANT),
|
||||
])
|
||||
)
|
||||
|
||||
llm_node = workflow_converter._convert_to_llm_node(
|
||||
original_app_mode=AppMode.CHAT,
|
||||
new_app_mode=new_app_mode,
|
||||
model_config=model_config_mock,
|
||||
graph=graph,
|
||||
prompt_template=prompt_template
|
||||
)
|
||||
|
||||
assert llm_node["data"]["type"] == "llm"
|
||||
assert llm_node["data"]["model"]['name'] == model
|
||||
assert llm_node["data"]['model']["mode"] == model_mode.value
|
||||
assert isinstance(llm_node["data"]["prompt_template"], list)
|
||||
assert len(llm_node["data"]["prompt_template"]) == len(prompt_template.advanced_chat_prompt_template.messages)
|
||||
template = prompt_template.advanced_chat_prompt_template.messages[0].text
|
||||
for v in default_variables:
|
||||
template = template.replace('{{' + v.variable + '}}', '{{#start.' + v.variable + '#}}')
|
||||
assert llm_node["data"]["prompt_template"][0]['text'] == template
|
||||
|
||||
|
||||
def test__convert_to_llm_node_for_workflow_advanced_completion_model(default_variables):
|
||||
new_app_mode = AppMode.ADVANCED_CHAT
|
||||
model = "gpt-3.5-turbo-instruct"
|
||||
model_mode = LLMMode.COMPLETION
|
||||
|
||||
workflow_converter = WorkflowConverter()
|
||||
start_node = workflow_converter._convert_to_start_node(default_variables)
|
||||
graph = {
|
||||
"nodes": [
|
||||
start_node
|
||||
],
|
||||
"edges": [] # no need
|
||||
}
|
||||
|
||||
model_config_mock = MagicMock(spec=ModelConfigEntity)
|
||||
model_config_mock.provider = 'openai'
|
||||
model_config_mock.model = model
|
||||
model_config_mock.mode = model_mode.value
|
||||
model_config_mock.parameters = {}
|
||||
model_config_mock.stop = []
|
||||
|
||||
prompt_template = PromptTemplateEntity(
|
||||
prompt_type=PromptTemplateEntity.PromptType.ADVANCED,
|
||||
advanced_completion_prompt_template=AdvancedCompletionPromptTemplateEntity(
|
||||
prompt="You are a helpful assistant named {{name}}.\n\nContext:\n{{#context#}}\n\n"
|
||||
"Human: hi\nAssistant: ",
|
||||
role_prefix=AdvancedCompletionPromptTemplateEntity.RolePrefixEntity(
|
||||
user="Human",
|
||||
assistant="Assistant"
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
llm_node = workflow_converter._convert_to_llm_node(
|
||||
original_app_mode=AppMode.CHAT,
|
||||
new_app_mode=new_app_mode,
|
||||
model_config=model_config_mock,
|
||||
graph=graph,
|
||||
prompt_template=prompt_template
|
||||
)
|
||||
|
||||
assert llm_node["data"]["type"] == "llm"
|
||||
assert llm_node["data"]["model"]['name'] == model
|
||||
assert llm_node["data"]['model']["mode"] == model_mode.value
|
||||
assert isinstance(llm_node["data"]["prompt_template"], dict)
|
||||
template = prompt_template.advanced_completion_prompt_template.prompt
|
||||
for v in default_variables:
|
||||
template = template.replace('{{' + v.variable + '}}', '{{#start.' + v.variable + '#}}')
|
||||
assert llm_node["data"]["prompt_template"]['text'] == template
|
||||
Reference in New Issue
Block a user