ss
This commit is contained in:
69
backend/app/core/redis_client.py
Normal file
69
backend/app/core/redis_client.py
Normal file
@@ -0,0 +1,69 @@
|
||||
"""
|
||||
Redis客户端
|
||||
"""
|
||||
import redis
|
||||
from app.core.config import settings
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_redis_client = None
|
||||
|
||||
def get_redis_client():
|
||||
"""
|
||||
获取Redis客户端(单例模式)
|
||||
|
||||
Returns:
|
||||
redis.Redis: Redis客户端实例,如果连接失败则返回None
|
||||
"""
|
||||
global _redis_client
|
||||
|
||||
if _redis_client is not None:
|
||||
try:
|
||||
# 测试连接
|
||||
_redis_client.ping()
|
||||
return _redis_client
|
||||
except:
|
||||
# 连接已断开,重新创建
|
||||
_redis_client = None
|
||||
|
||||
try:
|
||||
redis_url = getattr(settings, 'REDIS_URL', None)
|
||||
if not redis_url:
|
||||
logger.warning("REDIS_URL未配置,无法使用Redis缓存")
|
||||
return None
|
||||
|
||||
# 解析Redis URL: redis://host:port/db
|
||||
if redis_url.startswith('redis://'):
|
||||
redis_url = redis_url.replace('redis://', '')
|
||||
|
||||
# 分离host:port和db
|
||||
parts = redis_url.split('/')
|
||||
host_port = parts[0]
|
||||
db = int(parts[1]) if len(parts) > 1 and parts[1].isdigit() else 0
|
||||
|
||||
# 分离host和port
|
||||
if ':' in host_port:
|
||||
host, port = host_port.split(':')
|
||||
port = int(port)
|
||||
else:
|
||||
host = host_port
|
||||
port = 6379
|
||||
|
||||
_redis_client = redis.Redis(
|
||||
host=host,
|
||||
port=port,
|
||||
db=db,
|
||||
decode_responses=True, # 自动解码为字符串
|
||||
socket_connect_timeout=2,
|
||||
socket_timeout=2
|
||||
)
|
||||
|
||||
# 测试连接
|
||||
_redis_client.ping()
|
||||
logger.info(f"Redis连接成功: {host}:{port}/{db}")
|
||||
return _redis_client
|
||||
except Exception as e:
|
||||
logger.warning(f"Redis连接失败: {str(e)},将使用内存缓存")
|
||||
_redis_client = None
|
||||
return None
|
||||
File diff suppressed because it is too large
Load Diff
@@ -71,7 +71,7 @@ class WorkflowValidator:
|
||||
node_type = node.get('type')
|
||||
if not node_type:
|
||||
self.errors.append(f"节点 {node_id} 缺少类型")
|
||||
elif node_type not in ['start', 'input', 'llm', 'condition', 'transform', 'output', 'end', 'default', 'loop', 'foreach', 'loop_end', 'agent', 'http', 'request', 'database', 'db', 'file', 'file_operation', 'schedule', 'delay', 'timer', 'webhook', 'email', 'mail', 'message_queue', 'mq', 'rabbitmq', 'kafka']:
|
||||
elif node_type not in ['start', 'input', 'llm', 'condition', 'transform', 'output', 'end', 'default', 'loop', 'foreach', 'loop_end', 'agent', 'http', 'request', 'database', 'db', 'file', 'file_operation', 'schedule', 'delay', 'timer', 'webhook', 'email', 'mail', 'message_queue', 'mq', 'rabbitmq', 'kafka', 'switch', 'merge', 'wait', 'json', 'text', 'cache', 'vector_db', 'log', 'error_handler', 'csv', 'object_storage', 'slack', 'dingtalk', 'dingding', 'wechat_work', 'wecom', 'sms', 'pdf', 'image', 'excel', 'subworkflow', 'code', 'oauth', 'validator', 'batch']:
|
||||
self.warnings.append(f"节点 {node_id} 使用了未知类型: {node_type}")
|
||||
|
||||
def _validate_edges(self):
|
||||
|
||||
549
backend/scripts/generate_chat_agent.py
Normal file
549
backend/scripts/generate_chat_agent.py
Normal file
@@ -0,0 +1,549 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
生成智能聊天Agent示例
|
||||
展示如何使用平台能力构建一个完整的聊天智能体,包含:
|
||||
- 记忆管理(缓存节点)
|
||||
- 意图识别(LLM节点)
|
||||
- 多分支路由(Switch节点)
|
||||
- 上下文传递(Transform节点)
|
||||
- 多轮对话支持
|
||||
"""
|
||||
import sys
|
||||
import os
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
from app.core.database import SessionLocal
|
||||
from app.models.agent import Agent
|
||||
from app.models.user import User
|
||||
from datetime import datetime
|
||||
import uuid
|
||||
|
||||
|
||||
def generate_chat_agent(db: Session, user: User):
|
||||
"""生成智能聊天Agent - 完整示例"""
|
||||
nodes = []
|
||||
edges = []
|
||||
|
||||
# ========== 1. 开始节点 ==========
|
||||
start_node = {
|
||||
"id": "start-1",
|
||||
"type": "start",
|
||||
"position": {"x": 50, "y": 400},
|
||||
"data": {
|
||||
"label": "开始",
|
||||
"output_format": "json"
|
||||
}
|
||||
}
|
||||
nodes.append(start_node)
|
||||
|
||||
# ========== 2. 查询记忆节点 ==========
|
||||
query_memory_node = {
|
||||
"id": "cache-query",
|
||||
"type": "cache",
|
||||
"position": {"x": 250, "y": 400},
|
||||
"data": {
|
||||
"label": "查询记忆",
|
||||
"operation": "get",
|
||||
"key": "user_memory_{user_id}",
|
||||
"default_value": '{"conversation_history": [], "user_profile": {}, "context": {}}'
|
||||
}
|
||||
}
|
||||
nodes.append(query_memory_node)
|
||||
|
||||
# ========== 3. 合并用户输入和记忆 ==========
|
||||
merge_context_node = {
|
||||
"id": "transform-merge",
|
||||
"type": "transform",
|
||||
"position": {"x": 450, "y": 400},
|
||||
"data": {
|
||||
"label": "合并上下文",
|
||||
"mode": "merge",
|
||||
"mapping": {
|
||||
"user_input": "{{query}}",
|
||||
"memory": "{{output}}",
|
||||
"timestamp": "{{timestamp}}"
|
||||
}
|
||||
}
|
||||
}
|
||||
nodes.append(merge_context_node)
|
||||
|
||||
# ========== 4. 意图理解节点 ==========
|
||||
intent_node = {
|
||||
"id": "llm-intent",
|
||||
"type": "llm",
|
||||
"position": {"x": 650, "y": 400},
|
||||
"data": {
|
||||
"label": "意图理解",
|
||||
"provider": "deepseek",
|
||||
"model": "deepseek-chat",
|
||||
"temperature": "0.3",
|
||||
"max_tokens": "1000",
|
||||
"prompt": """你是一个专业的对话意图分析助手。请分析用户的输入,识别用户的意图和情感。
|
||||
|
||||
用户输入:{{user_input}}
|
||||
对话历史:{{memory.conversation_history}}
|
||||
用户画像:{{memory.user_profile}}
|
||||
|
||||
请以JSON格式输出分析结果:
|
||||
{
|
||||
"intent": "意图类型(greeting/question/emotion/request/goodbye/other)",
|
||||
"emotion": "情感状态(positive/neutral/negative)",
|
||||
"keywords": ["关键词1", "关键词2"],
|
||||
"topic": "话题主题",
|
||||
"needs_response": true
|
||||
}
|
||||
|
||||
请确保输出是有效的JSON格式,不要包含其他文字。"""
|
||||
}
|
||||
}
|
||||
nodes.append(intent_node)
|
||||
|
||||
# ========== 5. Switch节点 - 根据意图分支 ==========
|
||||
switch_node = {
|
||||
"id": "switch-intent",
|
||||
"type": "switch",
|
||||
"position": {"x": 850, "y": 400},
|
||||
"data": {
|
||||
"label": "意图路由",
|
||||
"field": "intent",
|
||||
"cases": {
|
||||
"greeting": "greeting-handle",
|
||||
"question": "question-handle",
|
||||
"emotion": "emotion-handle",
|
||||
"request": "request-handle",
|
||||
"goodbye": "goodbye-handle"
|
||||
},
|
||||
"default": "general-handle"
|
||||
}
|
||||
}
|
||||
nodes.append(switch_node)
|
||||
|
||||
# ========== 6. 问候处理分支 ==========
|
||||
greeting_node = {
|
||||
"id": "llm-greeting",
|
||||
"type": "llm",
|
||||
"position": {"x": 1050, "y": 200},
|
||||
"data": {
|
||||
"label": "问候回复",
|
||||
"provider": "deepseek",
|
||||
"model": "deepseek-chat",
|
||||
"temperature": "0.7",
|
||||
"max_tokens": "500",
|
||||
"prompt": """你是一个温暖、友好的AI助手。用户向你打招呼,请用自然、亲切的方式回应。
|
||||
|
||||
用户输入:{{user_input}}
|
||||
对话历史:{{memory.conversation_history}}
|
||||
|
||||
请生成一个友好、自然的问候回复,长度控制在50字以内。直接输出回复内容,不要包含其他说明。"""
|
||||
}
|
||||
}
|
||||
nodes.append(greeting_node)
|
||||
|
||||
# ========== 7. 问题处理分支 ==========
|
||||
question_node = {
|
||||
"id": "llm-question",
|
||||
"type": "llm",
|
||||
"position": {"x": 1050, "y": 300},
|
||||
"data": {
|
||||
"label": "问题回答",
|
||||
"provider": "deepseek",
|
||||
"model": "deepseek-chat",
|
||||
"temperature": "0.5",
|
||||
"max_tokens": "2000",
|
||||
"prompt": """你是一个知识渊博、乐于助人的AI助手。请回答用户的问题。
|
||||
|
||||
用户问题:{{user_input}}
|
||||
对话历史:{{memory.conversation_history}}
|
||||
意图分析:{{output}}
|
||||
|
||||
请提供:
|
||||
1. 直接、准确的答案
|
||||
2. 必要的解释和说明
|
||||
3. 如果问题不明确,友好地询问更多信息
|
||||
|
||||
请以自然、易懂的方式回答,长度控制在200字以内。直接输出回答内容。"""
|
||||
}
|
||||
}
|
||||
nodes.append(question_node)
|
||||
|
||||
# ========== 8. 情感处理分支 ==========
|
||||
emotion_node = {
|
||||
"id": "llm-emotion",
|
||||
"type": "llm",
|
||||
"position": {"x": 1050, "y": 400},
|
||||
"data": {
|
||||
"label": "情感回应",
|
||||
"provider": "deepseek",
|
||||
"model": "deepseek-chat",
|
||||
"temperature": "0.8",
|
||||
"max_tokens": "1000",
|
||||
"prompt": """你是一个善解人意的AI助手。请根据用户的情感状态,给予适当的回应。
|
||||
|
||||
用户输入:{{user_input}}
|
||||
情感状态:{{output.emotion}}
|
||||
对话历史:{{memory.conversation_history}}
|
||||
|
||||
请根据用户的情感:
|
||||
- 如果是积极情感:给予鼓励和共鸣
|
||||
- 如果是消极情感:给予理解、安慰和支持
|
||||
- 如果是中性情感:给予关注和陪伴
|
||||
|
||||
请生成一个温暖、共情的回复,长度控制在150字以内。直接输出回复内容。"""
|
||||
}
|
||||
}
|
||||
nodes.append(emotion_node)
|
||||
|
||||
# ========== 9. 请求处理分支 ==========
|
||||
request_node = {
|
||||
"id": "llm-request",
|
||||
"type": "llm",
|
||||
"position": {"x": 1050, "y": 500},
|
||||
"data": {
|
||||
"label": "请求处理",
|
||||
"provider": "deepseek",
|
||||
"model": "deepseek-chat",
|
||||
"temperature": "0.4",
|
||||
"max_tokens": "1500",
|
||||
"prompt": """你是一个专业的AI助手。用户提出了一个请求,请分析并回应。
|
||||
|
||||
用户请求:{{user_input}}
|
||||
意图分析:{{output}}
|
||||
对话历史:{{memory.conversation_history}}
|
||||
|
||||
请:
|
||||
1. 理解用户的请求内容
|
||||
2. 如果可以满足,说明如何满足
|
||||
3. 如果无法满足,友好地说明原因并提供替代方案
|
||||
|
||||
请以清晰、友好的方式回应,长度控制在200字以内。直接输出回复内容。"""
|
||||
}
|
||||
}
|
||||
nodes.append(request_node)
|
||||
|
||||
# ========== 10. 告别处理分支 ==========
|
||||
goodbye_node = {
|
||||
"id": "llm-goodbye",
|
||||
"type": "llm",
|
||||
"position": {"x": 1050, "y": 600},
|
||||
"data": {
|
||||
"label": "告别回复",
|
||||
"provider": "deepseek",
|
||||
"model": "deepseek-chat",
|
||||
"temperature": "0.6",
|
||||
"max_tokens": "300",
|
||||
"prompt": """你是一个友好的AI助手。用户要结束对话,请给予温暖的告别。
|
||||
|
||||
用户输入:{{user_input}}
|
||||
对话历史:{{memory.conversation_history}}
|
||||
|
||||
请生成一个温暖、友好的告别回复,表达期待下次交流。长度控制在50字以内。直接输出回复内容。"""
|
||||
}
|
||||
}
|
||||
nodes.append(goodbye_node)
|
||||
|
||||
# ========== 11. 通用处理分支 ==========
|
||||
general_node = {
|
||||
"id": "llm-general",
|
||||
"type": "llm",
|
||||
"position": {"x": 1050, "y": 700},
|
||||
"data": {
|
||||
"label": "通用回复",
|
||||
"provider": "deepseek",
|
||||
"model": "deepseek-chat",
|
||||
"temperature": "0.6",
|
||||
"max_tokens": "1000",
|
||||
"prompt": """你是一个友好、专业的AI助手。请回应用户的输入。
|
||||
|
||||
用户输入:{{user_input}}
|
||||
对话历史:{{memory.conversation_history}}
|
||||
意图分析:{{output}}
|
||||
|
||||
请生成一个自然、有意义的回复,保持对话的连贯性。长度控制在150字以内。直接输出回复内容。"""
|
||||
}
|
||||
}
|
||||
nodes.append(general_node)
|
||||
|
||||
# ========== 12. Merge节点 - 合并所有分支结果 ==========
|
||||
merge_response_node = {
|
||||
"id": "merge-response",
|
||||
"type": "merge",
|
||||
"position": {"x": 1250, "y": 400},
|
||||
"data": {
|
||||
"label": "合并回复",
|
||||
"mode": "merge_first",
|
||||
"strategy": "object"
|
||||
}
|
||||
}
|
||||
nodes.append(merge_response_node)
|
||||
|
||||
# ========== 13. 更新记忆节点 ==========
|
||||
update_memory_node = {
|
||||
"id": "cache-update",
|
||||
"type": "cache",
|
||||
"position": {"x": 1450, "y": 400},
|
||||
"data": {
|
||||
"label": "更新记忆",
|
||||
"operation": "set",
|
||||
"key": "user_memory_{user_id}",
|
||||
"value": '{"conversation_history": {{memory.conversation_history}} + [{"role": "user", "content": "{{user_input}}", "timestamp": "{{timestamp}}"}, {"role": "assistant", "content": "{{output}}", "timestamp": "{{timestamp}}"}], "user_profile": {{memory.user_profile}}, "context": {{memory.context}}}',
|
||||
"ttl": 86400
|
||||
}
|
||||
}
|
||||
nodes.append(update_memory_node)
|
||||
|
||||
# ========== 14. 格式化最终回复 ==========
|
||||
format_response_node = {
|
||||
"id": "llm-format",
|
||||
"type": "llm",
|
||||
"position": {"x": 1650, "y": 400},
|
||||
"data": {
|
||||
"label": "格式化回复",
|
||||
"provider": "deepseek",
|
||||
"model": "deepseek-chat",
|
||||
"temperature": "0.3",
|
||||
"max_tokens": "500",
|
||||
"prompt": """请将以下回复内容格式化为最终输出。确保回复自然、流畅。
|
||||
|
||||
原始回复:{{output}}
|
||||
|
||||
请直接输出格式化后的回复内容,不要包含其他说明或标记。如果原始回复已经是合适的格式,直接输出即可。"""
|
||||
}
|
||||
}
|
||||
nodes.append(format_response_node)
|
||||
|
||||
# ========== 15. 结束节点 ==========
|
||||
end_node = {
|
||||
"id": "end-1",
|
||||
"type": "end",
|
||||
"position": {"x": 1850, "y": 400},
|
||||
"data": {
|
||||
"label": "结束",
|
||||
"output_format": "text"
|
||||
}
|
||||
}
|
||||
nodes.append(end_node)
|
||||
|
||||
# ========== 连接边 ==========
|
||||
# 开始 -> 查询记忆
|
||||
edges.append({
|
||||
"id": "e1",
|
||||
"source": "start-1",
|
||||
"target": "cache-query",
|
||||
"sourceHandle": "right",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
|
||||
# 查询记忆 -> 合并上下文
|
||||
edges.append({
|
||||
"id": "e2",
|
||||
"source": "cache-query",
|
||||
"target": "transform-merge",
|
||||
"sourceHandle": "right",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
|
||||
# 合并上下文 -> 意图理解
|
||||
edges.append({
|
||||
"id": "e3",
|
||||
"source": "transform-merge",
|
||||
"target": "llm-intent",
|
||||
"sourceHandle": "right",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
|
||||
# 意图理解 -> Switch路由
|
||||
edges.append({
|
||||
"id": "e4",
|
||||
"source": "llm-intent",
|
||||
"target": "switch-intent",
|
||||
"sourceHandle": "right",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
|
||||
# Switch -> 各分支处理节点
|
||||
edges.append({
|
||||
"id": "e5-greeting",
|
||||
"source": "switch-intent",
|
||||
"target": "llm-greeting",
|
||||
"sourceHandle": "greeting-handle",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
edges.append({
|
||||
"id": "e5-question",
|
||||
"source": "switch-intent",
|
||||
"target": "llm-question",
|
||||
"sourceHandle": "question-handle",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
edges.append({
|
||||
"id": "e5-emotion",
|
||||
"source": "switch-intent",
|
||||
"target": "llm-emotion",
|
||||
"sourceHandle": "emotion-handle",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
edges.append({
|
||||
"id": "e5-request",
|
||||
"source": "switch-intent",
|
||||
"target": "llm-request",
|
||||
"sourceHandle": "request-handle",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
edges.append({
|
||||
"id": "e5-goodbye",
|
||||
"source": "switch-intent",
|
||||
"target": "llm-goodbye",
|
||||
"sourceHandle": "goodbye-handle",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
edges.append({
|
||||
"id": "e5-general",
|
||||
"source": "switch-intent",
|
||||
"target": "llm-general",
|
||||
"sourceHandle": "default",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
|
||||
# 各分支 -> Merge节点
|
||||
edges.append({
|
||||
"id": "e6-greeting",
|
||||
"source": "llm-greeting",
|
||||
"target": "merge-response",
|
||||
"sourceHandle": "right",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
edges.append({
|
||||
"id": "e6-question",
|
||||
"source": "llm-question",
|
||||
"target": "merge-response",
|
||||
"sourceHandle": "right",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
edges.append({
|
||||
"id": "e6-emotion",
|
||||
"source": "llm-emotion",
|
||||
"target": "merge-response",
|
||||
"sourceHandle": "right",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
edges.append({
|
||||
"id": "e6-request",
|
||||
"source": "llm-request",
|
||||
"target": "merge-response",
|
||||
"sourceHandle": "right",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
edges.append({
|
||||
"id": "e6-goodbye",
|
||||
"source": "llm-goodbye",
|
||||
"target": "merge-response",
|
||||
"sourceHandle": "right",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
edges.append({
|
||||
"id": "e6-general",
|
||||
"source": "llm-general",
|
||||
"target": "merge-response",
|
||||
"sourceHandle": "right",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
|
||||
# Merge -> 更新记忆
|
||||
edges.append({
|
||||
"id": "e7",
|
||||
"source": "merge-response",
|
||||
"target": "cache-update",
|
||||
"sourceHandle": "right",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
|
||||
# 更新记忆 -> 格式化回复
|
||||
edges.append({
|
||||
"id": "e8",
|
||||
"source": "cache-update",
|
||||
"target": "llm-format",
|
||||
"sourceHandle": "right",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
|
||||
# 格式化回复 -> 结束
|
||||
edges.append({
|
||||
"id": "e9",
|
||||
"source": "llm-format",
|
||||
"target": "end-1",
|
||||
"sourceHandle": "right",
|
||||
"targetHandle": "left"
|
||||
})
|
||||
|
||||
return {
|
||||
"name": "智能聊天助手(完整示例)",
|
||||
"description": """一个完整的聊天智能体示例,展示平台的核心能力:
|
||||
- ✅ 记忆管理:使用缓存节点存储和查询对话历史
|
||||
- ✅ 意图识别:使用LLM节点分析用户意图
|
||||
- ✅ 多分支路由:使用Switch节点根据意图分发到不同处理分支
|
||||
- ✅ 上下文传递:使用Transform节点合并数据
|
||||
- ✅ 多轮对话:支持上下文记忆和连贯对话
|
||||
- ✅ 个性化回复:根据不同意图生成针对性回复
|
||||
|
||||
适用场景:情感陪聊、客服助手、智能问答等聊天场景。""",
|
||||
"workflow_config": {"nodes": nodes, "edges": edges}
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
"""主函数:生成并保存Agent"""
|
||||
db = SessionLocal()
|
||||
try:
|
||||
# 获取或创建测试用户
|
||||
user = db.query(User).filter(User.username == "admin").first()
|
||||
if not user:
|
||||
print("请先创建admin用户")
|
||||
return
|
||||
|
||||
# 生成Agent
|
||||
agent_data = generate_chat_agent(db, user)
|
||||
|
||||
# 检查是否已存在
|
||||
existing = db.query(Agent).filter(
|
||||
Agent.name == agent_data["name"],
|
||||
Agent.user_id == user.id
|
||||
).first()
|
||||
|
||||
if existing:
|
||||
print(f"Agent '{agent_data['name']}' 已存在,跳过创建")
|
||||
return
|
||||
|
||||
# 创建Agent
|
||||
agent = Agent(
|
||||
name=agent_data["name"],
|
||||
description=agent_data["description"],
|
||||
workflow_config=agent_data["workflow_config"],
|
||||
user_id=user.id,
|
||||
status="draft"
|
||||
)
|
||||
db.add(agent)
|
||||
db.commit()
|
||||
db.refresh(agent)
|
||||
|
||||
print(f"✅ 成功创建Agent: {agent.name} (ID: {agent.id})")
|
||||
print(f" 节点数量: {len(agent_data['workflow_config']['nodes'])}")
|
||||
print(f" 连接数量: {len(agent_data['workflow_config']['edges'])}")
|
||||
print(f"\n📝 使用说明:")
|
||||
print(f" 1. 在Agent管理页面找到 '{agent.name}'")
|
||||
print(f" 2. 点击'设计'按钮进入工作流编辑器")
|
||||
print(f" 3. 配置LLM节点的API密钥(如需要)")
|
||||
print(f" 4. 点击'发布'按钮发布Agent")
|
||||
print(f" 5. 点击'使用'按钮测试对话功能")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ 创建Agent失败: {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
db.rollback()
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
232
backend/tests/test_nodes_all.py
Normal file
232
backend/tests/test_nodes_all.py
Normal file
@@ -0,0 +1,232 @@
|
||||
import asyncio
|
||||
import pytest
|
||||
|
||||
from app.services.workflow_engine import WorkflowEngine
|
||||
|
||||
|
||||
def _engine_with(nodes, edges=None):
|
||||
wf_data = {"nodes": nodes, "edges": edges or []}
|
||||
return WorkflowEngine(workflow_id="wf_all", workflow_data=wf_data)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_switch_branch():
|
||||
node = {
|
||||
"id": "sw1",
|
||||
"type": "switch",
|
||||
"data": {"field": "status", "cases": {"ok": "ok_handle"}, "default": "def"},
|
||||
}
|
||||
engine = _engine_with([node])
|
||||
res = await engine.execute_node(node, {"status": "ok"})
|
||||
assert res["status"] == "success"
|
||||
assert res["branch"] == "ok_handle"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_merge_array_strategy():
|
||||
node = {"id": "m1", "type": "merge", "data": {"strategy": "array"}}
|
||||
engine = _engine_with([node])
|
||||
res = await engine.execute_node(node, {"a": 1, "b": 2})
|
||||
assert res["status"] == "success"
|
||||
assert isinstance(res["output"], list)
|
||||
assert len(res["output"]) == 2
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_wait_time_mode():
|
||||
node = {
|
||||
"id": "w1",
|
||||
"type": "wait",
|
||||
"data": {"wait_type": "time", "wait_seconds": 0.01},
|
||||
}
|
||||
engine = _engine_with([node])
|
||||
res = await engine.execute_node(node, {"ping": True})
|
||||
assert res["status"] == "success"
|
||||
assert res["output"]["ping"] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_json_parse_and_extract():
|
||||
node = {
|
||||
"id": "j1",
|
||||
"type": "json",
|
||||
"data": {"operation": "extract", "path": "$.data.value"},
|
||||
}
|
||||
engine = _engine_with([node])
|
||||
res = await engine.execute_node(node, {"data": {"value": 42}})
|
||||
assert res["status"] == "success"
|
||||
assert res["output"] == 42
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_split():
|
||||
node = {
|
||||
"id": "t1",
|
||||
"type": "text",
|
||||
"data": {"operation": "split", "delimiter": ","},
|
||||
}
|
||||
engine = _engine_with([node])
|
||||
res = await engine.execute_node(node, "a,b,c")
|
||||
assert res["status"] == "success"
|
||||
assert res["output"] == ["a", "b", "c"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cache_set_then_get():
|
||||
node_set = {
|
||||
"id": "cset",
|
||||
"type": "cache",
|
||||
"data": {"operation": "set", "key": "k1", "ttl": 1},
|
||||
}
|
||||
node_get = {
|
||||
"id": "cget",
|
||||
"type": "cache",
|
||||
"data": {"operation": "get", "key": "k1", "ttl": 1},
|
||||
}
|
||||
engine = _engine_with([node_set, node_get])
|
||||
await engine.execute_node(node_set, {"value": "v"})
|
||||
res_get = await engine.execute_node(node_get, {})
|
||||
assert res_get["status"] == "success"
|
||||
assert res_get["output"] == "v"
|
||||
assert res_get["cache_hit"] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_vector_db_upsert_search_delete():
|
||||
node = {
|
||||
"id": "vec",
|
||||
"type": "vector_db",
|
||||
"data": {"operation": "upsert", "collection": "col"},
|
||||
}
|
||||
engine = _engine_with([node])
|
||||
up = await engine.execute_node(node, {"embedding": [1.0, 0.0], "text": "hi"})
|
||||
assert up["status"] == "success"
|
||||
|
||||
node_search = {
|
||||
"id": "vecs",
|
||||
"type": "vector_db",
|
||||
"data": {
|
||||
"operation": "search",
|
||||
"collection": "col",
|
||||
"query_vector": [1.0, 0.0],
|
||||
"top_k": 1,
|
||||
},
|
||||
}
|
||||
res = await engine.execute_node(node_search, {})
|
||||
assert res["status"] == "success"
|
||||
assert len(res["output"]) == 1
|
||||
|
||||
node_del = {
|
||||
"id": "vecd",
|
||||
"type": "vector_db",
|
||||
"data": {"operation": "delete", "collection": "col"},
|
||||
}
|
||||
del_res = await engine.execute_node(node_del, {})
|
||||
assert del_res["status"] == "success"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_log_basic():
|
||||
node = {
|
||||
"id": "log1",
|
||||
"type": "log",
|
||||
"data": {"level": "info", "message": "hello {x}", "include_data": False},
|
||||
}
|
||||
engine = _engine_with([node])
|
||||
res = await engine.execute_node(node, {"x": 1})
|
||||
assert res["status"] == "success"
|
||||
assert res["log"]["message"].startswith("节点执行") or res["log"]["message"].startswith("hello")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_error_handler_notify():
|
||||
node = {
|
||||
"id": "err1",
|
||||
"type": "error_handler",
|
||||
"data": {"on_error": "notify"},
|
||||
}
|
||||
engine = _engine_with([node])
|
||||
res = await engine.execute_node(node, {"status": "failed", "error": "boom"})
|
||||
assert res["status"] == "error_handled"
|
||||
assert res["error"] == "boom"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_csv_parse_and_generate():
|
||||
node_parse = {
|
||||
"id": "csvp",
|
||||
"type": "csv",
|
||||
"data": {"operation": "parse", "delimiter": ",", "headers": True},
|
||||
}
|
||||
engine = _engine_with([node_parse])
|
||||
csv_text = "a,b\n1,2\n"
|
||||
res = await engine.execute_node(node_parse, csv_text)
|
||||
assert res["status"] == "success"
|
||||
assert res["output"][0]["a"] == "1"
|
||||
|
||||
node_gen = {
|
||||
"id": "csvg",
|
||||
"type": "csv",
|
||||
"data": {"operation": "generate", "delimiter": ",", "headers": True},
|
||||
}
|
||||
res_gen = await engine.execute_node(node_gen, [{"a": 1, "b": 2}])
|
||||
assert res_gen["status"] == "success"
|
||||
assert "a,b" in res_gen["output"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_object_storage_upload_download():
|
||||
node_up = {
|
||||
"id": "osup",
|
||||
"type": "object_storage",
|
||||
"data": {
|
||||
"operation": "upload",
|
||||
"provider": "s3",
|
||||
"bucket": "bk",
|
||||
"key": "file.txt",
|
||||
},
|
||||
}
|
||||
engine = _engine_with([node_up])
|
||||
res_up = await engine.execute_node(node_up, {"file": "data"})
|
||||
assert res_up["status"] == "success"
|
||||
assert res_up["output"]["status"] == "uploaded"
|
||||
|
||||
node_down = {
|
||||
"id": "osdown",
|
||||
"type": "object_storage",
|
||||
"data": {
|
||||
"operation": "download",
|
||||
"provider": "s3",
|
||||
"bucket": "bk",
|
||||
"key": "file.txt",
|
||||
},
|
||||
}
|
||||
res_down = await engine.execute_node(node_down, {})
|
||||
assert res_down["status"] == "success"
|
||||
assert res_down["output"]["status"] == "downloaded"
|
||||
|
||||
|
||||
# 集成/外部依赖重的节点标记跳过,避免网络/编译/二进制依赖
|
||||
heavy_nodes = [
|
||||
"llm",
|
||||
"agent",
|
||||
"http",
|
||||
"webhook",
|
||||
"email",
|
||||
"message_queue",
|
||||
"database",
|
||||
"file",
|
||||
"pdf",
|
||||
"image",
|
||||
"excel",
|
||||
"slack",
|
||||
"dingtalk",
|
||||
"wechat_work",
|
||||
"sms",
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="重依赖/外部IO,保留集成测试")
|
||||
@pytest.mark.asyncio
|
||||
async def test_heavy_nodes_placeholder():
|
||||
assert True
|
||||
136
backend/tests/test_nodes_phase4.py
Normal file
136
backend/tests/test_nodes_phase4.py
Normal file
@@ -0,0 +1,136 @@
|
||||
import pytest
|
||||
|
||||
from app.services.workflow_engine import WorkflowEngine
|
||||
|
||||
|
||||
def _make_engine_with_node(node):
|
||||
"""构造仅含单节点的工作流引擎"""
|
||||
wf_data = {"nodes": [node], "edges": []}
|
||||
return WorkflowEngine(workflow_id="wf_test", workflow_data=wf_data)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_subworkflow_mapping():
|
||||
node = {
|
||||
"id": "sub-1",
|
||||
"type": "subworkflow",
|
||||
"data": {
|
||||
"workflow_id": "child_wf",
|
||||
"input_mapping": {"mapped": "source"},
|
||||
},
|
||||
}
|
||||
engine = _make_engine_with_node(node)
|
||||
result = await engine.execute_node(node, {"source": 123, "other": 1})
|
||||
assert result["status"] == "success"
|
||||
assert result["output"]["workflow_id"] == "child_wf"
|
||||
assert result["output"]["input"]["mapped"] == 123
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_code_python_success():
|
||||
node = {
|
||||
"id": "code-1",
|
||||
"type": "code",
|
||||
"data": {
|
||||
"language": "python",
|
||||
"code": "result = input_data['x'] * 2",
|
||||
},
|
||||
}
|
||||
engine = _make_engine_with_node(node)
|
||||
result = await engine.execute_node(node, {"x": 3})
|
||||
assert result["status"] == "success"
|
||||
assert result["output"] == 6
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_code_unsupported_language():
|
||||
node = {
|
||||
"id": "code-2",
|
||||
"type": "code",
|
||||
"data": {"language": "go", "code": "result = 1"},
|
||||
}
|
||||
engine = _make_engine_with_node(node)
|
||||
result = await engine.execute_node(node, {})
|
||||
assert result["status"] == "success"
|
||||
assert "不支持的语言" in result["output"]["error"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_oauth_mock_token():
|
||||
node = {
|
||||
"id": "oauth-1",
|
||||
"type": "oauth",
|
||||
"data": {"provider": "google", "client_id": "id", "client_secret": "sec"},
|
||||
}
|
||||
engine = _make_engine_with_node(node)
|
||||
result = await engine.execute_node(node, {})
|
||||
assert result["status"] == "success"
|
||||
token = result["output"]
|
||||
assert token["access_token"].startswith("mock_access_token_google")
|
||||
assert token["token_type"] == "Bearer"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validator_reject_and_continue():
|
||||
# reject 分支 -> failed
|
||||
node_reject = {
|
||||
"id": "val-1",
|
||||
"type": "validator",
|
||||
"data": {"schema": {"type": "object"}, "on_error": "reject"},
|
||||
}
|
||||
engine = _make_engine_with_node(node_reject)
|
||||
res_reject = await engine.execute_node(node_reject, "bad_type")
|
||||
assert res_reject["status"] == "failed"
|
||||
|
||||
# continue 分支 -> success 且 warning
|
||||
node_continue = {
|
||||
"id": "val-2",
|
||||
"type": "validator",
|
||||
"data": {"schema": {"type": "object"}, "on_error": "continue"},
|
||||
}
|
||||
engine = _make_engine_with_node(node_continue)
|
||||
res_continue = await engine.execute_node(node_continue, "bad_type")
|
||||
assert res_continue["status"] == "success"
|
||||
assert "warning" in res_continue
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_batch_split_group_aggregate():
|
||||
data = list(range(5))
|
||||
|
||||
# split
|
||||
node_split = {
|
||||
"id": "batch-1",
|
||||
"type": "batch",
|
||||
"data": {"batch_size": 2, "mode": "split"},
|
||||
}
|
||||
engine = _make_engine_with_node(node_split)
|
||||
res_split = await engine.execute_node(node_split, data)
|
||||
assert res_split["status"] == "success"
|
||||
assert res_split["output"][0] == [0, 1]
|
||||
assert res_split["output"][1] == [2, 3]
|
||||
assert res_split["output"][2] == [4]
|
||||
|
||||
# group(同 split 逻辑)
|
||||
node_group = {
|
||||
"id": "batch-2",
|
||||
"type": "batch",
|
||||
"data": {"batch_size": 3, "mode": "group"},
|
||||
}
|
||||
engine = _make_engine_with_node(node_group)
|
||||
res_group = await engine.execute_node(node_group, data)
|
||||
assert res_group["status"] == "success"
|
||||
assert res_group["output"][0] == [0, 1, 2]
|
||||
assert res_group["output"][1] == [3, 4]
|
||||
|
||||
# aggregate
|
||||
node_agg = {
|
||||
"id": "batch-3",
|
||||
"type": "batch",
|
||||
"data": {"mode": "aggregate"},
|
||||
}
|
||||
engine = _make_engine_with_node(node_agg)
|
||||
res_agg = await engine.execute_node(node_agg, data)
|
||||
assert res_agg["status"] == "success"
|
||||
assert res_agg["output"]["count"] == 5
|
||||
assert res_agg["output"]["samples"][:2] == [0, 1]
|
||||
Reference in New Issue
Block a user