This commit is contained in:
rjb
2026-01-22 09:59:02 +08:00
parent 47dac9f33b
commit f7702f4e72
18 changed files with 8012 additions and 104 deletions

View File

@@ -0,0 +1,69 @@
"""
Redis客户端
"""
import redis
from app.core.config import settings
import logging
logger = logging.getLogger(__name__)
_redis_client = None
def get_redis_client():
"""
获取Redis客户端单例模式
Returns:
redis.Redis: Redis客户端实例如果连接失败则返回None
"""
global _redis_client
if _redis_client is not None:
try:
# 测试连接
_redis_client.ping()
return _redis_client
except:
# 连接已断开,重新创建
_redis_client = None
try:
redis_url = getattr(settings, 'REDIS_URL', None)
if not redis_url:
logger.warning("REDIS_URL未配置无法使用Redis缓存")
return None
# 解析Redis URL: redis://host:port/db
if redis_url.startswith('redis://'):
redis_url = redis_url.replace('redis://', '')
# 分离host:port和db
parts = redis_url.split('/')
host_port = parts[0]
db = int(parts[1]) if len(parts) > 1 and parts[1].isdigit() else 0
# 分离host和port
if ':' in host_port:
host, port = host_port.split(':')
port = int(port)
else:
host = host_port
port = 6379
_redis_client = redis.Redis(
host=host,
port=port,
db=db,
decode_responses=True, # 自动解码为字符串
socket_connect_timeout=2,
socket_timeout=2
)
# 测试连接
_redis_client.ping()
logger.info(f"Redis连接成功: {host}:{port}/{db}")
return _redis_client
except Exception as e:
logger.warning(f"Redis连接失败: {str(e)},将使用内存缓存")
_redis_client = None
return None

File diff suppressed because it is too large Load Diff

View File

@@ -71,7 +71,7 @@ class WorkflowValidator:
node_type = node.get('type') node_type = node.get('type')
if not node_type: if not node_type:
self.errors.append(f"节点 {node_id} 缺少类型") self.errors.append(f"节点 {node_id} 缺少类型")
elif node_type not in ['start', 'input', 'llm', 'condition', 'transform', 'output', 'end', 'default', 'loop', 'foreach', 'loop_end', 'agent', 'http', 'request', 'database', 'db', 'file', 'file_operation', 'schedule', 'delay', 'timer', 'webhook', 'email', 'mail', 'message_queue', 'mq', 'rabbitmq', 'kafka']: elif node_type not in ['start', 'input', 'llm', 'condition', 'transform', 'output', 'end', 'default', 'loop', 'foreach', 'loop_end', 'agent', 'http', 'request', 'database', 'db', 'file', 'file_operation', 'schedule', 'delay', 'timer', 'webhook', 'email', 'mail', 'message_queue', 'mq', 'rabbitmq', 'kafka', 'switch', 'merge', 'wait', 'json', 'text', 'cache', 'vector_db', 'log', 'error_handler', 'csv', 'object_storage', 'slack', 'dingtalk', 'dingding', 'wechat_work', 'wecom', 'sms', 'pdf', 'image', 'excel', 'subworkflow', 'code', 'oauth', 'validator', 'batch']:
self.warnings.append(f"节点 {node_id} 使用了未知类型: {node_type}") self.warnings.append(f"节点 {node_id} 使用了未知类型: {node_type}")
def _validate_edges(self): def _validate_edges(self):

View File

@@ -0,0 +1,549 @@
#!/usr/bin/env python3
"""
生成智能聊天Agent示例
展示如何使用平台能力构建一个完整的聊天智能体,包含:
- 记忆管理(缓存节点)
- 意图识别LLM节点
- 多分支路由Switch节点
- 上下文传递Transform节点
- 多轮对话支持
"""
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from sqlalchemy.orm import Session
from app.core.database import SessionLocal
from app.models.agent import Agent
from app.models.user import User
from datetime import datetime
import uuid
def generate_chat_agent(db: Session, user: User):
"""生成智能聊天Agent - 完整示例"""
nodes = []
edges = []
# ========== 1. 开始节点 ==========
start_node = {
"id": "start-1",
"type": "start",
"position": {"x": 50, "y": 400},
"data": {
"label": "开始",
"output_format": "json"
}
}
nodes.append(start_node)
# ========== 2. 查询记忆节点 ==========
query_memory_node = {
"id": "cache-query",
"type": "cache",
"position": {"x": 250, "y": 400},
"data": {
"label": "查询记忆",
"operation": "get",
"key": "user_memory_{user_id}",
"default_value": '{"conversation_history": [], "user_profile": {}, "context": {}}'
}
}
nodes.append(query_memory_node)
# ========== 3. 合并用户输入和记忆 ==========
merge_context_node = {
"id": "transform-merge",
"type": "transform",
"position": {"x": 450, "y": 400},
"data": {
"label": "合并上下文",
"mode": "merge",
"mapping": {
"user_input": "{{query}}",
"memory": "{{output}}",
"timestamp": "{{timestamp}}"
}
}
}
nodes.append(merge_context_node)
# ========== 4. 意图理解节点 ==========
intent_node = {
"id": "llm-intent",
"type": "llm",
"position": {"x": 650, "y": 400},
"data": {
"label": "意图理解",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.3",
"max_tokens": "1000",
"prompt": """你是一个专业的对话意图分析助手。请分析用户的输入,识别用户的意图和情感。
用户输入:{{user_input}}
对话历史:{{memory.conversation_history}}
用户画像:{{memory.user_profile}}
请以JSON格式输出分析结果
{
"intent": "意图类型greeting/question/emotion/request/goodbye/other",
"emotion": "情感状态positive/neutral/negative",
"keywords": ["关键词1", "关键词2"],
"topic": "话题主题",
"needs_response": true
}
请确保输出是有效的JSON格式不要包含其他文字。"""
}
}
nodes.append(intent_node)
# ========== 5. Switch节点 - 根据意图分支 ==========
switch_node = {
"id": "switch-intent",
"type": "switch",
"position": {"x": 850, "y": 400},
"data": {
"label": "意图路由",
"field": "intent",
"cases": {
"greeting": "greeting-handle",
"question": "question-handle",
"emotion": "emotion-handle",
"request": "request-handle",
"goodbye": "goodbye-handle"
},
"default": "general-handle"
}
}
nodes.append(switch_node)
# ========== 6. 问候处理分支 ==========
greeting_node = {
"id": "llm-greeting",
"type": "llm",
"position": {"x": 1050, "y": 200},
"data": {
"label": "问候回复",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.7",
"max_tokens": "500",
"prompt": """你是一个温暖、友好的AI助手。用户向你打招呼请用自然、亲切的方式回应。
用户输入:{{user_input}}
对话历史:{{memory.conversation_history}}
请生成一个友好、自然的问候回复长度控制在50字以内。直接输出回复内容不要包含其他说明。"""
}
}
nodes.append(greeting_node)
# ========== 7. 问题处理分支 ==========
question_node = {
"id": "llm-question",
"type": "llm",
"position": {"x": 1050, "y": 300},
"data": {
"label": "问题回答",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.5",
"max_tokens": "2000",
"prompt": """你是一个知识渊博、乐于助人的AI助手。请回答用户的问题。
用户问题:{{user_input}}
对话历史:{{memory.conversation_history}}
意图分析:{{output}}
请提供:
1. 直接、准确的答案
2. 必要的解释和说明
3. 如果问题不明确,友好地询问更多信息
请以自然、易懂的方式回答长度控制在200字以内。直接输出回答内容。"""
}
}
nodes.append(question_node)
# ========== 8. 情感处理分支 ==========
emotion_node = {
"id": "llm-emotion",
"type": "llm",
"position": {"x": 1050, "y": 400},
"data": {
"label": "情感回应",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.8",
"max_tokens": "1000",
"prompt": """你是一个善解人意的AI助手。请根据用户的情感状态给予适当的回应。
用户输入:{{user_input}}
情感状态:{{output.emotion}}
对话历史:{{memory.conversation_history}}
请根据用户的情感:
- 如果是积极情感:给予鼓励和共鸣
- 如果是消极情感:给予理解、安慰和支持
- 如果是中性情感:给予关注和陪伴
请生成一个温暖、共情的回复长度控制在150字以内。直接输出回复内容。"""
}
}
nodes.append(emotion_node)
# ========== 9. 请求处理分支 ==========
request_node = {
"id": "llm-request",
"type": "llm",
"position": {"x": 1050, "y": 500},
"data": {
"label": "请求处理",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.4",
"max_tokens": "1500",
"prompt": """你是一个专业的AI助手。用户提出了一个请求请分析并回应。
用户请求:{{user_input}}
意图分析:{{output}}
对话历史:{{memory.conversation_history}}
请:
1. 理解用户的请求内容
2. 如果可以满足,说明如何满足
3. 如果无法满足,友好地说明原因并提供替代方案
请以清晰、友好的方式回应长度控制在200字以内。直接输出回复内容。"""
}
}
nodes.append(request_node)
# ========== 10. 告别处理分支 ==========
goodbye_node = {
"id": "llm-goodbye",
"type": "llm",
"position": {"x": 1050, "y": 600},
"data": {
"label": "告别回复",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.6",
"max_tokens": "300",
"prompt": """你是一个友好的AI助手。用户要结束对话请给予温暖的告别。
用户输入:{{user_input}}
对话历史:{{memory.conversation_history}}
请生成一个温暖、友好的告别回复表达期待下次交流。长度控制在50字以内。直接输出回复内容。"""
}
}
nodes.append(goodbye_node)
# ========== 11. 通用处理分支 ==========
general_node = {
"id": "llm-general",
"type": "llm",
"position": {"x": 1050, "y": 700},
"data": {
"label": "通用回复",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.6",
"max_tokens": "1000",
"prompt": """你是一个友好、专业的AI助手。请回应用户的输入。
用户输入:{{user_input}}
对话历史:{{memory.conversation_history}}
意图分析:{{output}}
请生成一个自然、有意义的回复保持对话的连贯性。长度控制在150字以内。直接输出回复内容。"""
}
}
nodes.append(general_node)
# ========== 12. Merge节点 - 合并所有分支结果 ==========
merge_response_node = {
"id": "merge-response",
"type": "merge",
"position": {"x": 1250, "y": 400},
"data": {
"label": "合并回复",
"mode": "merge_first",
"strategy": "object"
}
}
nodes.append(merge_response_node)
# ========== 13. 更新记忆节点 ==========
update_memory_node = {
"id": "cache-update",
"type": "cache",
"position": {"x": 1450, "y": 400},
"data": {
"label": "更新记忆",
"operation": "set",
"key": "user_memory_{user_id}",
"value": '{"conversation_history": {{memory.conversation_history}} + [{"role": "user", "content": "{{user_input}}", "timestamp": "{{timestamp}}"}, {"role": "assistant", "content": "{{output}}", "timestamp": "{{timestamp}}"}], "user_profile": {{memory.user_profile}}, "context": {{memory.context}}}',
"ttl": 86400
}
}
nodes.append(update_memory_node)
# ========== 14. 格式化最终回复 ==========
format_response_node = {
"id": "llm-format",
"type": "llm",
"position": {"x": 1650, "y": 400},
"data": {
"label": "格式化回复",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.3",
"max_tokens": "500",
"prompt": """请将以下回复内容格式化为最终输出。确保回复自然、流畅。
原始回复:{{output}}
请直接输出格式化后的回复内容,不要包含其他说明或标记。如果原始回复已经是合适的格式,直接输出即可。"""
}
}
nodes.append(format_response_node)
# ========== 15. 结束节点 ==========
end_node = {
"id": "end-1",
"type": "end",
"position": {"x": 1850, "y": 400},
"data": {
"label": "结束",
"output_format": "text"
}
}
nodes.append(end_node)
# ========== 连接边 ==========
# 开始 -> 查询记忆
edges.append({
"id": "e1",
"source": "start-1",
"target": "cache-query",
"sourceHandle": "right",
"targetHandle": "left"
})
# 查询记忆 -> 合并上下文
edges.append({
"id": "e2",
"source": "cache-query",
"target": "transform-merge",
"sourceHandle": "right",
"targetHandle": "left"
})
# 合并上下文 -> 意图理解
edges.append({
"id": "e3",
"source": "transform-merge",
"target": "llm-intent",
"sourceHandle": "right",
"targetHandle": "left"
})
# 意图理解 -> Switch路由
edges.append({
"id": "e4",
"source": "llm-intent",
"target": "switch-intent",
"sourceHandle": "right",
"targetHandle": "left"
})
# Switch -> 各分支处理节点
edges.append({
"id": "e5-greeting",
"source": "switch-intent",
"target": "llm-greeting",
"sourceHandle": "greeting-handle",
"targetHandle": "left"
})
edges.append({
"id": "e5-question",
"source": "switch-intent",
"target": "llm-question",
"sourceHandle": "question-handle",
"targetHandle": "left"
})
edges.append({
"id": "e5-emotion",
"source": "switch-intent",
"target": "llm-emotion",
"sourceHandle": "emotion-handle",
"targetHandle": "left"
})
edges.append({
"id": "e5-request",
"source": "switch-intent",
"target": "llm-request",
"sourceHandle": "request-handle",
"targetHandle": "left"
})
edges.append({
"id": "e5-goodbye",
"source": "switch-intent",
"target": "llm-goodbye",
"sourceHandle": "goodbye-handle",
"targetHandle": "left"
})
edges.append({
"id": "e5-general",
"source": "switch-intent",
"target": "llm-general",
"sourceHandle": "default",
"targetHandle": "left"
})
# 各分支 -> Merge节点
edges.append({
"id": "e6-greeting",
"source": "llm-greeting",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
})
edges.append({
"id": "e6-question",
"source": "llm-question",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
})
edges.append({
"id": "e6-emotion",
"source": "llm-emotion",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
})
edges.append({
"id": "e6-request",
"source": "llm-request",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
})
edges.append({
"id": "e6-goodbye",
"source": "llm-goodbye",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
})
edges.append({
"id": "e6-general",
"source": "llm-general",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
})
# Merge -> 更新记忆
edges.append({
"id": "e7",
"source": "merge-response",
"target": "cache-update",
"sourceHandle": "right",
"targetHandle": "left"
})
# 更新记忆 -> 格式化回复
edges.append({
"id": "e8",
"source": "cache-update",
"target": "llm-format",
"sourceHandle": "right",
"targetHandle": "left"
})
# 格式化回复 -> 结束
edges.append({
"id": "e9",
"source": "llm-format",
"target": "end-1",
"sourceHandle": "right",
"targetHandle": "left"
})
return {
"name": "智能聊天助手(完整示例)",
"description": """一个完整的聊天智能体示例,展示平台的核心能力:
- ✅ 记忆管理:使用缓存节点存储和查询对话历史
- ✅ 意图识别使用LLM节点分析用户意图
- ✅ 多分支路由使用Switch节点根据意图分发到不同处理分支
- ✅ 上下文传递使用Transform节点合并数据
- ✅ 多轮对话:支持上下文记忆和连贯对话
- ✅ 个性化回复:根据不同意图生成针对性回复
适用场景:情感陪聊、客服助手、智能问答等聊天场景。""",
"workflow_config": {"nodes": nodes, "edges": edges}
}
def main():
"""主函数生成并保存Agent"""
db = SessionLocal()
try:
# 获取或创建测试用户
user = db.query(User).filter(User.username == "admin").first()
if not user:
print("请先创建admin用户")
return
# 生成Agent
agent_data = generate_chat_agent(db, user)
# 检查是否已存在
existing = db.query(Agent).filter(
Agent.name == agent_data["name"],
Agent.user_id == user.id
).first()
if existing:
print(f"Agent '{agent_data['name']}' 已存在,跳过创建")
return
# 创建Agent
agent = Agent(
name=agent_data["name"],
description=agent_data["description"],
workflow_config=agent_data["workflow_config"],
user_id=user.id,
status="draft"
)
db.add(agent)
db.commit()
db.refresh(agent)
print(f"✅ 成功创建Agent: {agent.name} (ID: {agent.id})")
print(f" 节点数量: {len(agent_data['workflow_config']['nodes'])}")
print(f" 连接数量: {len(agent_data['workflow_config']['edges'])}")
print(f"\n📝 使用说明:")
print(f" 1. 在Agent管理页面找到 '{agent.name}'")
print(f" 2. 点击'设计'按钮进入工作流编辑器")
print(f" 3. 配置LLM节点的API密钥如需要")
print(f" 4. 点击'发布'按钮发布Agent")
print(f" 5. 点击'使用'按钮测试对话功能")
except Exception as e:
print(f"❌ 创建Agent失败: {str(e)}")
import traceback
traceback.print_exc()
db.rollback()
finally:
db.close()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,232 @@
import asyncio
import pytest
from app.services.workflow_engine import WorkflowEngine
def _engine_with(nodes, edges=None):
wf_data = {"nodes": nodes, "edges": edges or []}
return WorkflowEngine(workflow_id="wf_all", workflow_data=wf_data)
@pytest.mark.asyncio
async def test_switch_branch():
node = {
"id": "sw1",
"type": "switch",
"data": {"field": "status", "cases": {"ok": "ok_handle"}, "default": "def"},
}
engine = _engine_with([node])
res = await engine.execute_node(node, {"status": "ok"})
assert res["status"] == "success"
assert res["branch"] == "ok_handle"
@pytest.mark.asyncio
async def test_merge_array_strategy():
node = {"id": "m1", "type": "merge", "data": {"strategy": "array"}}
engine = _engine_with([node])
res = await engine.execute_node(node, {"a": 1, "b": 2})
assert res["status"] == "success"
assert isinstance(res["output"], list)
assert len(res["output"]) == 2
@pytest.mark.asyncio
async def test_wait_time_mode():
node = {
"id": "w1",
"type": "wait",
"data": {"wait_type": "time", "wait_seconds": 0.01},
}
engine = _engine_with([node])
res = await engine.execute_node(node, {"ping": True})
assert res["status"] == "success"
assert res["output"]["ping"] is True
@pytest.mark.asyncio
async def test_json_parse_and_extract():
node = {
"id": "j1",
"type": "json",
"data": {"operation": "extract", "path": "$.data.value"},
}
engine = _engine_with([node])
res = await engine.execute_node(node, {"data": {"value": 42}})
assert res["status"] == "success"
assert res["output"] == 42
@pytest.mark.asyncio
async def test_text_split():
node = {
"id": "t1",
"type": "text",
"data": {"operation": "split", "delimiter": ","},
}
engine = _engine_with([node])
res = await engine.execute_node(node, "a,b,c")
assert res["status"] == "success"
assert res["output"] == ["a", "b", "c"]
@pytest.mark.asyncio
async def test_cache_set_then_get():
node_set = {
"id": "cset",
"type": "cache",
"data": {"operation": "set", "key": "k1", "ttl": 1},
}
node_get = {
"id": "cget",
"type": "cache",
"data": {"operation": "get", "key": "k1", "ttl": 1},
}
engine = _engine_with([node_set, node_get])
await engine.execute_node(node_set, {"value": "v"})
res_get = await engine.execute_node(node_get, {})
assert res_get["status"] == "success"
assert res_get["output"] == "v"
assert res_get["cache_hit"] is True
@pytest.mark.asyncio
async def test_vector_db_upsert_search_delete():
node = {
"id": "vec",
"type": "vector_db",
"data": {"operation": "upsert", "collection": "col"},
}
engine = _engine_with([node])
up = await engine.execute_node(node, {"embedding": [1.0, 0.0], "text": "hi"})
assert up["status"] == "success"
node_search = {
"id": "vecs",
"type": "vector_db",
"data": {
"operation": "search",
"collection": "col",
"query_vector": [1.0, 0.0],
"top_k": 1,
},
}
res = await engine.execute_node(node_search, {})
assert res["status"] == "success"
assert len(res["output"]) == 1
node_del = {
"id": "vecd",
"type": "vector_db",
"data": {"operation": "delete", "collection": "col"},
}
del_res = await engine.execute_node(node_del, {})
assert del_res["status"] == "success"
@pytest.mark.asyncio
async def test_log_basic():
node = {
"id": "log1",
"type": "log",
"data": {"level": "info", "message": "hello {x}", "include_data": False},
}
engine = _engine_with([node])
res = await engine.execute_node(node, {"x": 1})
assert res["status"] == "success"
assert res["log"]["message"].startswith("节点执行") or res["log"]["message"].startswith("hello")
@pytest.mark.asyncio
async def test_error_handler_notify():
node = {
"id": "err1",
"type": "error_handler",
"data": {"on_error": "notify"},
}
engine = _engine_with([node])
res = await engine.execute_node(node, {"status": "failed", "error": "boom"})
assert res["status"] == "error_handled"
assert res["error"] == "boom"
@pytest.mark.asyncio
async def test_csv_parse_and_generate():
node_parse = {
"id": "csvp",
"type": "csv",
"data": {"operation": "parse", "delimiter": ",", "headers": True},
}
engine = _engine_with([node_parse])
csv_text = "a,b\n1,2\n"
res = await engine.execute_node(node_parse, csv_text)
assert res["status"] == "success"
assert res["output"][0]["a"] == "1"
node_gen = {
"id": "csvg",
"type": "csv",
"data": {"operation": "generate", "delimiter": ",", "headers": True},
}
res_gen = await engine.execute_node(node_gen, [{"a": 1, "b": 2}])
assert res_gen["status"] == "success"
assert "a,b" in res_gen["output"]
@pytest.mark.asyncio
async def test_object_storage_upload_download():
node_up = {
"id": "osup",
"type": "object_storage",
"data": {
"operation": "upload",
"provider": "s3",
"bucket": "bk",
"key": "file.txt",
},
}
engine = _engine_with([node_up])
res_up = await engine.execute_node(node_up, {"file": "data"})
assert res_up["status"] == "success"
assert res_up["output"]["status"] == "uploaded"
node_down = {
"id": "osdown",
"type": "object_storage",
"data": {
"operation": "download",
"provider": "s3",
"bucket": "bk",
"key": "file.txt",
},
}
res_down = await engine.execute_node(node_down, {})
assert res_down["status"] == "success"
assert res_down["output"]["status"] == "downloaded"
# 集成/外部依赖重的节点标记跳过,避免网络/编译/二进制依赖
heavy_nodes = [
"llm",
"agent",
"http",
"webhook",
"email",
"message_queue",
"database",
"file",
"pdf",
"image",
"excel",
"slack",
"dingtalk",
"wechat_work",
"sms",
]
@pytest.mark.skip(reason="重依赖/外部IO保留集成测试")
@pytest.mark.asyncio
async def test_heavy_nodes_placeholder():
assert True

View File

@@ -0,0 +1,136 @@
import pytest
from app.services.workflow_engine import WorkflowEngine
def _make_engine_with_node(node):
"""构造仅含单节点的工作流引擎"""
wf_data = {"nodes": [node], "edges": []}
return WorkflowEngine(workflow_id="wf_test", workflow_data=wf_data)
@pytest.mark.asyncio
async def test_subworkflow_mapping():
node = {
"id": "sub-1",
"type": "subworkflow",
"data": {
"workflow_id": "child_wf",
"input_mapping": {"mapped": "source"},
},
}
engine = _make_engine_with_node(node)
result = await engine.execute_node(node, {"source": 123, "other": 1})
assert result["status"] == "success"
assert result["output"]["workflow_id"] == "child_wf"
assert result["output"]["input"]["mapped"] == 123
@pytest.mark.asyncio
async def test_code_python_success():
node = {
"id": "code-1",
"type": "code",
"data": {
"language": "python",
"code": "result = input_data['x'] * 2",
},
}
engine = _make_engine_with_node(node)
result = await engine.execute_node(node, {"x": 3})
assert result["status"] == "success"
assert result["output"] == 6
@pytest.mark.asyncio
async def test_code_unsupported_language():
node = {
"id": "code-2",
"type": "code",
"data": {"language": "go", "code": "result = 1"},
}
engine = _make_engine_with_node(node)
result = await engine.execute_node(node, {})
assert result["status"] == "success"
assert "不支持的语言" in result["output"]["error"]
@pytest.mark.asyncio
async def test_oauth_mock_token():
node = {
"id": "oauth-1",
"type": "oauth",
"data": {"provider": "google", "client_id": "id", "client_secret": "sec"},
}
engine = _make_engine_with_node(node)
result = await engine.execute_node(node, {})
assert result["status"] == "success"
token = result["output"]
assert token["access_token"].startswith("mock_access_token_google")
assert token["token_type"] == "Bearer"
@pytest.mark.asyncio
async def test_validator_reject_and_continue():
# reject 分支 -> failed
node_reject = {
"id": "val-1",
"type": "validator",
"data": {"schema": {"type": "object"}, "on_error": "reject"},
}
engine = _make_engine_with_node(node_reject)
res_reject = await engine.execute_node(node_reject, "bad_type")
assert res_reject["status"] == "failed"
# continue 分支 -> success 且 warning
node_continue = {
"id": "val-2",
"type": "validator",
"data": {"schema": {"type": "object"}, "on_error": "continue"},
}
engine = _make_engine_with_node(node_continue)
res_continue = await engine.execute_node(node_continue, "bad_type")
assert res_continue["status"] == "success"
assert "warning" in res_continue
@pytest.mark.asyncio
async def test_batch_split_group_aggregate():
data = list(range(5))
# split
node_split = {
"id": "batch-1",
"type": "batch",
"data": {"batch_size": 2, "mode": "split"},
}
engine = _make_engine_with_node(node_split)
res_split = await engine.execute_node(node_split, data)
assert res_split["status"] == "success"
assert res_split["output"][0] == [0, 1]
assert res_split["output"][1] == [2, 3]
assert res_split["output"][2] == [4]
# group同 split 逻辑)
node_group = {
"id": "batch-2",
"type": "batch",
"data": {"batch_size": 3, "mode": "group"},
}
engine = _make_engine_with_node(node_group)
res_group = await engine.execute_node(node_group, data)
assert res_group["status"] == "success"
assert res_group["output"][0] == [0, 1, 2]
assert res_group["output"][1] == [3, 4]
# aggregate
node_agg = {
"id": "batch-3",
"type": "batch",
"data": {"mode": "aggregate"},
}
engine = _make_engine_with_node(node_agg)
res_agg = await engine.execute_node(node_agg, data)
assert res_agg["status"] == "success"
assert res_agg["output"]["count"] == 5
assert res_agg["output"]["samples"][:2] == [0, 1]

164
check_switch_logs.py Executable file
View File

@@ -0,0 +1,164 @@
#!/usr/bin/env python3
"""
查看Switch节点日志的专用脚本
用于诊断Switch节点的分支过滤问题
"""
import sys
import os
import json
from datetime import datetime
# 添加项目路径
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'backend'))
from app.core.database import SessionLocal
from app.models.execution import Execution
from app.models.execution_log import ExecutionLog
def format_json(data):
"""格式化JSON数据"""
if isinstance(data, dict):
return json.dumps(data, ensure_ascii=False, indent=2)
return str(data)
def main():
"""主函数"""
db = SessionLocal()
try:
# 获取最近的执行记录
print("=" * 80)
print("查找最近的Agent执行记录...")
print("=" * 80)
execution = db.query(Execution).filter(
Execution.agent_id.isnot(None)
).order_by(Execution.created_at.desc()).first()
if not execution:
print("❌ 没有找到执行记录")
return
print(f"\n✅ 找到执行记录: {execution.id}")
print(f" 状态: {execution.status}")
print(f" 执行时间: {execution.execution_time}ms")
print(f" 创建时间: {execution.created_at}")
# 获取执行日志
print("\n" + "=" * 80)
print("Switch节点相关日志:")
print("=" * 80)
logs = db.query(ExecutionLog).filter(
ExecutionLog.execution_id == execution.id
).order_by(ExecutionLog.timestamp.asc()).all()
if not logs:
print("❌ 没有找到执行日志")
return
# 筛选Switch节点相关的日志
switch_logs = []
for log in logs:
if log.node_type == 'switch' or 'Switch' in log.message or '[rjb] Switch' in log.message:
switch_logs.append(log)
if not switch_logs:
print("❌ 没有找到Switch节点相关的日志")
print("\n所有日志节点类型:")
node_types = set(log.node_type for log in logs if log.node_type)
for nt in sorted(node_types):
print(f" - {nt}")
return
print(f"\n找到 {len(switch_logs)} 条Switch节点相关日志:\n")
for i, log in enumerate(switch_logs, 1):
print(f"[{i}] {log.timestamp.strftime('%H:%M:%S.%f')[:-3]} [{log.level}]")
print(f" 节点: {log.node_id or '(无)'} ({log.node_type or '(无)'})")
print(f" 消息: {log.message}")
if log.data:
print(f" 数据:")
data_str = format_json(log.data)
# 显示完整数据
for line in data_str.split('\n'):
print(f" {line}")
if log.duration:
print(f" 耗时: {log.duration}ms")
print()
# 特别分析Switch节点的匹配和过滤过程
print("=" * 80)
print("Switch节点执行流程分析:")
print("=" * 80)
match_logs = [log for log in switch_logs if '匹配' in log.message]
filter_logs = [log for log in switch_logs if '过滤' in log.message]
if match_logs:
print("\n📊 匹配阶段:")
for log in match_logs:
if log.data:
data = log.data
print(f" 节点 {log.node_id}:")
print(f" 字段: {data.get('field', 'N/A')}")
print(f" 字段值: {data.get('field_value', 'N/A')}")
print(f" 匹配的分支: {data.get('matched_case', 'N/A')}")
print(f" 处理后的输入键: {data.get('processed_input_keys', 'N/A')}")
if filter_logs:
print("\n🔍 过滤阶段:")
for log in filter_logs:
if log.data:
data = log.data
print(f" 节点 {log.node_id}:")
print(f" 匹配的分支: {data.get('branch', 'N/A')}")
print(f" 过滤前边数: {data.get('edges_before', 'N/A')}")
print(f" 保留边数: {data.get('edges_kept', 'N/A')}")
print(f" 移除边数: {data.get('edges_removed', 'N/A')}")
# 检查意图理解节点的输出
print("\n" + "=" * 80)
print("意图理解节点输出分析:")
print("=" * 80)
intent_logs = [log for log in logs if log.node_id and 'intent' in log.node_id.lower()]
if intent_logs:
for log in intent_logs:
if log.message == "节点执行完成" and log.data:
print(f"\n节点 {log.node_id} 的输出:")
output = log.data.get('output', {})
print(format_json(output))
else:
print("❌ 没有找到意图理解节点的日志")
# 检查所有节点的输出(用于调试)
print("\n" + "=" * 80)
print("所有节点输出摘要:")
print("=" * 80)
node_outputs = {}
for log in logs:
if log.message == "节点执行完成" and log.node_id:
node_outputs[log.node_id] = log.data.get('output', {})
for node_id, output in node_outputs.items():
if isinstance(output, str) and len(output) > 100:
print(f"{node_id}: {output[:100]}...")
else:
print(f"{node_id}: {output}")
except Exception as e:
print(f"❌ 错误: {str(e)}")
import traceback
traceback.print_exc()
finally:
db.close()
if __name__ == "__main__":
main()

108
debug_switch_node.py Normal file
View File

@@ -0,0 +1,108 @@
#!/usr/bin/env python3
"""
调试Switch节点的详细脚本
"""
import sys
import os
import json
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'backend'))
from app.core.database import SessionLocal
from app.models.execution import Execution
from app.models.agent import Agent
def main():
db = SessionLocal()
try:
# 获取最近的执行记录
execution = db.query(Execution).filter(
Execution.agent_id.isnot(None)
).order_by(Execution.created_at.desc()).first()
if not execution:
print("❌ 没有找到执行记录")
return
print(f"执行ID: {execution.id}")
print(f"状态: {execution.status}")
print()
# 获取Agent配置
agent = db.query(Agent).filter(Agent.id == execution.agent_id).first()
if not agent:
print("❌ 没有找到Agent")
return
workflow_config = agent.workflow_config
nodes = workflow_config.get('nodes', [])
edges = workflow_config.get('edges', [])
# 找到Switch节点
switch_node = None
for node in nodes:
if node.get('type') == 'switch':
switch_node = node
break
if not switch_node:
print("❌ 没有找到Switch节点")
return
print("=" * 80)
print("Switch节点配置:")
print("=" * 80)
print(f"节点ID: {switch_node['id']}")
print(f"字段: {switch_node['data'].get('field')}")
print(f"Cases: {json.dumps(switch_node['data'].get('cases', {}), ensure_ascii=False, indent=2)}")
print(f"Default: {switch_node['data'].get('default')}")
print()
# 找到从Switch节点出发的边
print("=" * 80)
print("从Switch节点出发的边:")
print("=" * 80)
switch_edges = [e for e in edges if e.get('source') == switch_node['id']]
for edge in switch_edges:
print(f"边ID: {edge.get('id')}")
print(f" sourceHandle: {edge.get('sourceHandle')}")
print(f" target: {edge.get('target')}")
print()
# 查看执行结果
print("=" * 80)
print("执行结果中的节点输出:")
print("=" * 80)
if execution.output_data and 'node_results' in execution.output_data:
node_results = execution.output_data['node_results']
if switch_node['id'] in node_results:
switch_result = node_results[switch_node['id']]
print(f"Switch节点输出: {json.dumps(switch_result, ensure_ascii=False, indent=2)}")
else:
print("❌ Switch节点没有输出结果")
else:
print("❌ 没有找到节点执行结果")
# 检查哪些分支节点执行了
print()
print("=" * 80)
print("执行了的分支节点:")
print("=" * 80)
if execution.output_data and 'node_results' in execution.output_data:
node_results = execution.output_data['node_results']
for edge in switch_edges:
target_id = edge.get('target')
if target_id in node_results:
print(f"{target_id} (sourceHandle: {edge.get('sourceHandle')})")
else:
print(f"{target_id} (sourceHandle: {edge.get('sourceHandle')}) - 未执行")
except Exception as e:
print(f"❌ 错误: {str(e)}")
import traceback
traceback.print_exc()
finally:
db.close()
if __name__ == "__main__":
main()

View File

@@ -179,6 +179,7 @@ const inputMessage = ref('')
const loading = ref(false) const loading = ref(false)
const messagesContainer = ref<HTMLElement>() const messagesContainer = ref<HTMLElement>()
let pollingInterval: any = null let pollingInterval: any = null
let replyAdded = false // 标志位:防止重复添加回复
// 发送消息 // 发送消息
const handleSendMessage = async () => { const handleSendMessage = async () => {
@@ -210,9 +211,17 @@ const handleSendMessage = async () => {
const execution = response.data const execution = response.data
// 重置标志位
replyAdded = false
// 轮询执行状态 // 轮询执行状态
const checkStatus = async () => { const checkStatus = async () => {
try { try {
// 如果已经添加过回复,直接返回,避免重复添加
if (replyAdded) {
return
}
// 获取详细执行状态(包含节点执行信息) // 获取详细执行状态(包含节点执行信息)
const statusResponse = await api.get(`/api/v1/executions/${execution.id}/status`) const statusResponse = await api.get(`/api/v1/executions/${execution.id}/status`)
const status = statusResponse.data const status = statusResponse.data
@@ -225,6 +234,14 @@ const handleSendMessage = async () => {
const exec = execResponse.data const exec = execResponse.data
if (exec.status === 'completed') { if (exec.status === 'completed') {
// 防止重复添加:如果已经添加过回复,直接返回
if (replyAdded) {
return
}
// 标记已添加回复
replyAdded = true
// 提取Agent回复 // 提取Agent回复
let agentReply = '' let agentReply = ''
if (exec.output_data) { if (exec.output_data) {
@@ -268,6 +285,14 @@ const handleSendMessage = async () => {
pollingInterval = null pollingInterval = null
} }
} else if (exec.status === 'failed') { } else if (exec.status === 'failed') {
// 防止重复添加:如果已经添加过回复,直接返回
if (replyAdded) {
return
}
// 标记已添加回复
replyAdded = true
messages.value.push({ messages.value.push({
role: 'agent', role: 'agent',
content: `执行失败: ${exec.error_message || '未知错误'}`, content: `执行失败: ${exec.error_message || '未知错误'}`,
@@ -290,6 +315,14 @@ const handleSendMessage = async () => {
// 不需要做任何操作,等待下次轮询 // 不需要做任何操作,等待下次轮询
} }
} catch (error: any) { } catch (error: any) {
// 防止重复添加:如果已经添加过回复,直接返回
if (replyAdded) {
return
}
// 标记已添加回复
replyAdded = true
messages.value.push({ messages.value.push({
role: 'agent', role: 'agent',
content: `获取执行结果失败: ${error.response?.data?.detail || error.message}`, content: `获取执行结果失败: ${error.response?.data?.detail || error.message}`,

File diff suppressed because it is too large Load Diff

175
test_memory_functionality.py Executable file
View File

@@ -0,0 +1,175 @@
#!/usr/bin/env python3
"""
测试记忆功能
参考工作流调用测试总结.txt的测试方法
"""
import sys
sys.path.insert(0, 'backend')
from app.core.database import SessionLocal
from app.models.agent import Agent
from app.models.execution import Execution
from app.models.execution_log import ExecutionLog
import json
from datetime import datetime
def test_memory_functionality():
"""测试记忆功能"""
db = SessionLocal()
try:
# 获取智能聊天助手Agent
agent = db.query(Agent).filter(
Agent.name == '智能聊天助手(完整示例)'
).first()
if not agent:
print("❌ 未找到'智能聊天助手(完整示例)'Agent")
return
print(f"✅ 找到Agent: {agent.name} (ID: {agent.id})")
print("="*80)
# 获取最近的两次执行(应该对应两次对话)
executions = db.query(Execution).filter(
Execution.agent_id == agent.id
).order_by(Execution.created_at.desc()).limit(2).all()
if len(executions) < 2:
print(f"⚠️ 只找到 {len(executions)} 次执行需要至少2次执行来测试记忆功能")
print("请先进行两次对话测试")
return
print(f"\n找到 {len(executions)} 次执行记录")
print("="*80)
# 分析每次执行
for i, exec_record in enumerate(reversed(executions), 1): # 按时间正序
print(f"\n{'='*80}")
print(f"执行 {i}: {exec_record.id}")
print(f"输入: {exec_record.input_data}")
print(f"时间: {exec_record.created_at}")
print(f"状态: {exec_record.status}")
# 检查关键节点的数据流转
nodes_to_check = [
('cache-query', '查询记忆'),
('transform-merge', '合并上下文'),
('llm-question', '问题回答'),
('cache-update', '更新记忆'),
('llm-format', '格式化回复'),
('end-1', '最终输出')
]
for node_id, label in nodes_to_check:
# 查找节点执行完成的日志
log = db.query(ExecutionLog).filter(
ExecutionLog.execution_id == exec_record.id,
ExecutionLog.node_id == node_id,
ExecutionLog.message.like(f'节点 {node_id}%执行完成')
).first()
if not log:
# 如果没有执行完成的日志,查找开始执行的日志
log = db.query(ExecutionLog).filter(
ExecutionLog.execution_id == exec_record.id,
ExecutionLog.node_id == node_id,
ExecutionLog.message.like(f'节点 {node_id}%开始执行')
).first()
if log and log.data:
data_key = 'output' if '执行完成' in log.message else 'input'
data = log.data.get(data_key, {})
print(f"\n{label} ({node_id}):")
if isinstance(data, dict):
print(f" keys: {list(data.keys())}")
# 检查memory字段
if 'memory' in data:
memory = data['memory']
if isinstance(memory, dict):
print(f" ✅ memory存在keys: {list(memory.keys())}")
if 'conversation_history' in memory:
history = memory['conversation_history']
if isinstance(history, list):
print(f" ✅ conversation_history: {len(history)}")
if history:
print(f" 最新一条: {history[-1].get('content', '')[:50]}")
else:
print(f" ❌ conversation_history不是list: {type(history)}")
# 检查conversation_history字段可能在顶层
elif 'conversation_history' in data:
history = data['conversation_history']
if isinstance(history, list):
print(f" ✅ conversation_history在顶层: {len(history)}")
if history:
print(f" 最新一条: {history[-1].get('content', '')[:50]}")
# 对于end节点检查最终输出
if node_id == 'end-1' and 'output' in data:
output = data['output']
if isinstance(output, str):
print(f" ✅ 最终输出: {output[:200]}")
# 检查是否包含名字
if '老七' in output:
print(f" ✅ 输出中包含名字'老七'")
else:
print(f" ❌ 输出中不包含名字'老七'")
elif isinstance(data, str):
print(f" 输出类型: str, 内容: {data[:200]}")
if '老七' in data:
print(f" ✅ 输出中包含名字'老七'")
else:
print(f" ❌ 输出中不包含名字'老七'")
else:
print(f"\n{label} ({node_id}): ❌ 未找到执行日志")
# 检查最终输出
if exec_record.output_data:
output_data = exec_record.output_data
if isinstance(output_data, dict):
result = output_data.get('result', '')
if isinstance(result, str):
print(f"\n最终结果: {result[:200]}")
if '老七' in result:
print(f"✅ 最终结果中包含名字'老七'")
else:
print(f"❌ 最终结果中不包含名字'老七'")
# 检查Redis中的记忆数据
print(f"\n{'='*80}")
print("检查Redis中的记忆数据:")
try:
from app.core.redis_client import get_redis_client
redis_client = get_redis_client()
if redis_client:
keys = redis_client.keys('user_memory_*')
if keys:
for key in keys:
value = redis_client.get(key)
if value:
try:
memory_data = json.loads(value)
if 'conversation_history' in memory_data:
history = memory_data['conversation_history']
print(f"{key}: {len(history)} 条对话记录")
if history:
print(f" 最新一条: {history[-1].get('content', '')[:50]}")
except:
print(f" ⚠️ {key}: 无法解析JSON")
else:
print(" ❌ Redis中没有找到记忆数据")
else:
print(" ⚠️ Redis客户端不可用")
except Exception as e:
print(f" ⚠️ 检查Redis失败: {str(e)}")
print(f"\n{'='*80}")
print("测试完成")
finally:
db.close()
if __name__ == '__main__':
test_memory_functionality()

View File

@@ -0,0 +1,66 @@
#!/usr/bin/env python3
"""
测试output变量提取逻辑
"""
import sys
sys.path.insert(0, 'backend')
from app.services.workflow_engine import WorkflowEngine
# 模拟llm-format节点的输入数据
input_data = {
'right': {
'right': {
'right': '是的,我记得!根据我们之前的对话,你告诉我你的名字叫"老七"。我会在本次对话中记住这个名字,以便更好地为你提供帮助。如果你希望我用其他称呼,也可以随时告诉我。',
'query': '你还记得我的名字吗?'
},
'memory': {
'conversation_history': [],
'user_profile': {},
'context': {}
},
'query': '你还记得我的名字吗?'
},
'memory': {
'conversation_history': [],
'user_profile': {},
'context': {}
},
'query': '你还记得我的名字吗?'
}
# 创建WorkflowEngine实例
engine = WorkflowEngine("test", {"nodes": [], "edges": []})
# 测试_get_nested_value方法
print("测试_get_nested_value方法:")
value1 = engine._get_nested_value(input_data, 'output')
print(f" _get_nested_value(input_data, 'output'): {value1}")
# 测试output变量提取逻辑
print("\n测试output变量提取逻辑:")
right_value = input_data.get('right')
print(f" right_value类型: {type(right_value)}")
print(f" right_value: {str(right_value)[:100]}")
if right_value is not None:
if isinstance(right_value, str):
value = right_value
print(f" ✅ 从right字段字符串提取: {value[:100]}")
elif isinstance(right_value, dict):
current = right_value
depth = 0
while isinstance(current, dict) and depth < 10:
if 'right' in current:
current = current['right']
depth += 1
if isinstance(current, str):
value = current
print(f" ✅ 从right字段嵌套{depth}层)提取: {value[:100]}")
break
else:
break
else:
print(f" ❌ 无法提取字符串值")
print("\n测试完成")

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,296 @@
# 智能体聊天助手记忆存储说明
## 一、数据存储位置
### 1. 主要存储Redis
智能体聊天助手使用 **Redis** 作为记忆数据的持久化存储后端。
- **存储键名格式**`user_memory_{user_id}`
- 例如:`user_memory_default``user_memory_12345`
- **存储位置**Redis 数据库(默认 DB 0
- **数据格式**JSON 字符串
### 2. 备用存储:内存缓存
如果 Redis 不可用,系统会回退到**内存缓存**Memory Cache
⚠️ **重要提示**:内存缓存只在**单次执行会话内有效**,执行结束后数据会丢失,无法跨会话保留。
### 3. 存储结构
每个用户的记忆数据包含以下字段:
```json
{
"conversation_history": [
{
"role": "user",
"content": "我的名字叫老七",
"timestamp": "2024-01-01T10:00:00"
},
{
"role": "assistant",
"content": "好的,我记住了你的名字是老七。",
"timestamp": "2024-01-01T10:00:01"
}
],
"user_profile": {
// 用户画像信息(可扩展)
},
"context": {
// 上下文信息(可扩展)
}
}
```
## 二、数据大小限制
### 1. Redis 存储限制
- **单条记录大小**
- Redis 理论上单个 key 的值最大可达 **512MB**(默认配置)
- 实际使用中,受 Redis 服务器配置的 `maxmemory` 限制
- 当前系统:**无硬编码限制**(取决于 Redis 服务器配置)
- **对话历史累积**
- 对话历史会**不断累积**,没有自动截断机制
- 每次对话会添加 2 条记录(用户消息 + 助手回复)
- 假设每条消息平均 200 字(约 600 字节1000 轮对话约 1.2MB
### 2. 实际使用情况
根据当前系统检查:
- 当前用户记忆 key 数量:**1 个**
- 示例 key 大小:**约 5.73 KB**20 条对话历史)
- Redis 已使用内存:**2.52 MB**
- Redis 最大内存限制:**无限制**(取决于服务器配置)
### 3. 建议的容量规划
| 对话轮数 | 预估大小 | 说明 |
|---------|---------|------|
| 100 轮 | ~120 KB | 适合短期对话 |
| 500 轮 | ~600 KB | 适合中期对话 |
| 1000 轮 | ~1.2 MB | 适合长期对话 |
| 5000 轮 | ~6 MB | 需要监控内存使用 |
| 10000 轮 | ~12 MB | 建议实施截断策略 |
## 三、数据持久化与丢失风险
### 1. 数据持久化机制
#### Redis 持久化(推荐)
- **持久化方式**:取决于 Redis 配置
- **RDB**:定期快照,默认开启
- **AOF**:追加日志,可选开启
- **Docker 卷持久化**
- 使用 `redis_data` 卷存储数据
- 即使容器重启,数据也会保留
- 数据存储在 Docker 卷中,位置:`/var/lib/docker/volumes/redis_data`
#### 内存缓存(不持久化)
- 数据仅存在于进程内存中
- 进程重启后数据丢失
- 仅用于 Redis 不可用时的临时回退
### 2. 数据丢失风险分析
| 场景 | 数据是否丢失 | 说明 |
|------|------------|------|
| Redis 正常重启 | ❌ 不丢失 | 数据已持久化到磁盘 |
| Docker 容器重启 | ❌ 不丢失 | 数据存储在 Docker 卷中 |
| Redis 数据卷被删除 | ✅ **会丢失** | 需要重新创建卷 |
| 超过 TTL 时间 | ✅ **会过期** | 默认 24 小时后过期 |
| Redis 服务器故障 | ⚠️ 取决于持久化配置 | 如果持久化配置不当可能丢失 |
| 使用内存缓存时 | ✅ **会丢失** | 每次执行后丢失 |
### 3. TTL生存时间设置
当前配置:
- **TTL****86400 秒**24 小时)
- **位置**`cache-update` 节点的 `ttl` 配置
- **默认值**:如果未配置,默认 3600 秒1 小时)
⚠️ **重要**:如果用户在 24 小时内没有新的对话,记忆数据会**自动过期删除**。
## 四、配置说明
### 1. Redis 配置
**环境变量**`docker-compose.dev.yml`
```yaml
REDIS_URL=redis://redis:6379/0
```
**配置文件**`backend/.env`
```env
REDIS_URL=redis://localhost:6379/0
```
### 2. Cache 节点配置
**查询记忆节点**`cache-query`
```python
{
"id": "cache-query",
"type": "cache",
"data": {
"operation": "get",
"key": "user_memory_{user_id}",
"default_value": '{"conversation_history": [], "user_profile": {}, "context": {}}'
}
}
```
**更新记忆节点**`cache-update`
```python
{
"id": "cache-update",
"type": "cache",
"data": {
"operation": "set",
"key": "user_memory_{user_id}",
"value": '{"conversation_history": {{memory.conversation_history}} + [...], ...}',
"ttl": 86400 # 24小时
}
}
```
## 五、优化建议
### 1. 对话历史截断策略
如果对话历史过长,建议实施截断策略:
**方案 A保留最近 N 条**
```python
# 在 cache-update 节点中,限制 conversation_history 长度
conversation_history = memory.conversation_history[-100:] # 只保留最近100条
```
**方案 B按时间截断**
```python
# 只保留最近7天的对话
from datetime import datetime, timedelta
cutoff_date = (datetime.now() - timedelta(days=7)).isoformat()
conversation_history = [
msg for msg in memory.conversation_history
if msg.get('timestamp', '') > cutoff_date
]
```
**方案 C智能摘要**
```python
# 将旧对话历史压缩为摘要
# 使用 LLM 节点生成摘要,保留关键信息
```
### 2. 增加 TTL 时间
如果需要更长的记忆保留时间,可以修改 TTL
```python
"ttl": 604800 # 7天
"ttl": 2592000 # 30天
"ttl": 0 # 永不过期(不推荐,可能导致内存溢出)
```
### 3. 监控 Redis 内存使用
定期检查 Redis 内存使用情况:
```bash
# 进入 Redis 容器
docker exec -it aiagent-redis-1 redis-cli
# 查看内存信息
INFO memory
# 查看所有用户记忆 key
KEYS user_memory_*
# 查看特定 key 的大小
MEMORY USAGE user_memory_default
```
### 4. 数据备份策略
**定期备份 Redis 数据**
```bash
# 备份 Redis 数据
docker exec aiagent-redis-1 redis-cli SAVE
docker cp aiagent-redis-1:/data/dump.rdb ./backup/dump_$(date +%Y%m%d).rdb
```
**恢复 Redis 数据**
```bash
# 恢复 Redis 数据
docker cp ./backup/dump_20240101.rdb aiagent-redis-1:/data/dump.rdb
docker restart aiagent-redis-1
```
## 六、常见问题
### Q1: 数据会丢失吗?
**A**:
- 如果使用 Redis 且配置了持久化:**不会丢失**(除非数据卷被删除或超过 TTL
- 如果使用内存缓存:**会丢失**(每次执行后丢失)
### Q2: 可以存储多少对话?
**A**:
- 理论上:取决于 Redis 服务器内存限制
- 实际建议:**1000-5000 轮对话**(约 1-6 MB
- 超过 10000 轮建议实施截断策略
### Q3: 如何延长记忆保留时间?
**A**:
- 修改 `cache-update` 节点的 `ttl` 配置
- 设置为更大的值(如 2592000 = 30 天)
- 或设置为 0永不过期需谨慎
### Q4: 如何清理特定用户的记忆?
**A**:
```bash
# 通过 Redis CLI
docker exec -it aiagent-redis-1 redis-cli DEL user_memory_{user_id}
# 或通过工作流添加 delete 操作节点
```
### Q5: 多个用户的数据会互相影响吗?
**A**:
- **不会**,每个用户使用独立的 key`user_memory_{user_id}`
- 数据完全隔离
## 七、总结
### 当前配置
-**存储位置**Redis持久化
-**TTL**24 小时
-**数据格式**JSON包含对话历史、用户画像、上下文
-**大小限制**:无硬编码限制(取决于 Redis 配置)
- ⚠️ **数据丢失风险**:低(除非数据卷被删除或超过 TTL
### 建议
1. **短期使用**< 1000 轮对话):当前配置足够
2. **长期使用**> 5000 轮对话):建议实施对话历史截断策略
3. **生产环境**:建议定期备份 Redis 数据,监控内存使用
4. **高可用场景**:考虑 Redis 主从复制或集群模式
---
**文档版本**v1.0
**最后更新**2024年
**维护人员**AI Assistant

View File

@@ -0,0 +1,344 @@
# 智能体聊天助手记忆问题修复文档
## 问题描述
智能聊天助手无法记住用户信息,具体表现为:
1. 第一次对话:用户输入 "我的名字叫老七"
2. 第二次对话:用户输入 "你还记得我的名字吗?"
3. **预期结果**:助手应该回答 "是的,我记得你叫老七"
4. **实际结果**:助手无法记住用户名字,或者回答错误
**额外问题**:助手有时会回复两次相同的消息。
## 问题分析
### 工作流结构
智能聊天助手的工作流包含以下关键节点:
1. **开始节点** (`start-1`) - 接收用户输入
2. **查询记忆节点** (`cache-query`) - 从Redis查询对话历史
3. **合并上下文节点** (`transform-merge`) - 合并用户输入和记忆
4. **意图理解节点** (`llm-intent`) - 分析用户意图
5. **意图路由节点** (`switch-intent`) - 根据意图分发到不同分支
6. **问题回答节点** (`llm-question`) - 生成回答
7. **合并回复节点** (`merge-response`) - 合并各分支结果
8. **更新记忆节点** (`cache-update`) - 更新对话历史到Redis
9. **格式化回复节点** (`llm-format`) - 格式化最终回复
10. **结束节点** (`end-1`) - 返回最终结果
### 根本原因
经过深入调试,发现以下问题:
#### 1. Cache节点数据存储问题
- **问题**`cache-update` 节点的 `value_template` 中,`{{user_input}}``{{output}}``{{timestamp}}` 等变量在Python表达式执行时被当作字符串 `"null"` 处理
- **原因**变量替换逻辑在Python表达式执行之后导致变量未正确替换
#### 2. Cache节点数据读取问题
- **问题**`cache-query` 节点的 `default_value` 中,`conversation_history` 初始化为 `null`,导致后续 `null + [...]` 操作失败
- **原因**JSON解析后空值被解析为 `None`,而不是空列表 `[]`
#### 3. Transform节点数据丢失问题
- **问题**`transform-merge` 节点的 `mapping` 中,`{{output}}` 映射到 `memory` 字段,但 `cache-query` 的输出结构不包含完整的 `memory` 对象
- **原因**`cache-query` 返回的数据结构是 `{"right": {...}}`,而 `transform-merge` 期望的是完整的 `memory` 对象
#### 4. 边连接导致的数据丢失问题
- **问题**`cache-query → transform-merge` 的边设置了 `sourceHandle='right'`,导致只有 `right` 字段被传递,其他内存相关字段丢失
- **原因**`get_node_input` 方法在处理 `sourceHandle` 时,只传递了指定字段,没有保留内存相关字段
#### 5. LLM节点变量替换问题
- **问题1**LLM节点的prompt模板中`{{user_input}}``{{memory.conversation_history}}` 无法正确替换
- **原因**:变量替换逻辑不支持嵌套路径(如 `{{memory.conversation_history}}`
- **问题2**`{{output}}` 变量无法从嵌套的 `right.right.right` 结构中提取
- **原因**:变量提取逻辑只检查顶层字段,没有递归查找
#### 6. 前端重复回复问题
- **问题**:助手有时会回复两次相同的消息
- **原因**:轮询逻辑中,`checkStatus` 函数在状态为 `completed` 时可能被多次调用,导致重复添加消息
## 修复方案
### 1. 修复Cache节点的变量替换逻辑
**文件**`backend/app/services/workflow_engine.py`
**修改位置**`execute_node` 方法中的 `cache` 节点处理逻辑
**关键修改**
```python
# 在Python表达式执行之前先替换变量
if '{{user_input}}' in value_template:
user_input_value = input_data.get('user_input') or input_data.get('query') or input_data.get('input') or input_data.get('USER_INPUT') or ''
user_input_escaped = json_module.dumps(user_input_value, ensure_ascii=False)[1:-1] # 移除外层引号
value_template = value_template.replace('{{user_input}}', user_input_escaped)
if '{{output}}' in value_template:
output_value = self._extract_output_value(input_data)
output_escaped = json_module.dumps(output_value, ensure_ascii=False)[1:-1] # 移除外层引号
value_template = value_template.replace('{{output}}', output_escaped)
if '{{timestamp}}' in value_template:
timestamp_value = input_data.get('timestamp') or datetime.now().isoformat()
timestamp_escaped = json_module.dumps(timestamp_value, ensure_ascii=False)[1:-1] # 移除外层引号
value_template = value_template.replace('{{timestamp}}', timestamp_escaped)
```
**说明**
- 使用 `json.dumps()[1:-1]` 来正确转义字符串,同时移除外层引号
- 在Python表达式执行之前完成变量替换
### 2. 修复Cache节点的默认值初始化
**文件**`backend/app/services/workflow_engine.py`
**修改位置**`execute_node` 方法中的 `cache-query` 处理逻辑
**关键修改**
```python
# 确保default_value中的conversation_history初始化为空列表
default_value_str = node_data.get('default_value', '{}')
try:
default_value = json_module.loads(default_value_str)
# 确保conversation_history是列表而不是null
if 'conversation_history' not in default_value or default_value.get('conversation_history') is None:
default_value['conversation_history'] = []
except:
default_value = {"conversation_history": [], "user_profile": {}, "context": {}}
```
### 3. 修复Transform节点的Memory字段构建
**文件**`backend/app/services/workflow_engine.py`
**修改位置**`execute_node` 方法中的 `transform` 节点处理逻辑(`merge` 模式)
**关键修改**
```python
# 如果memory字段为空尝试从顶层字段构建
if key == 'memory' and (value is None or value == '' or value == '{{output}}'):
# 尝试从expanded_input中构建memory对象
memory = {}
for field in ['conversation_history', 'user_profile', 'context']:
if field in expanded_input:
memory[field] = expanded_input[field]
if memory:
result[key] = memory
else:
# 如果还是找不到保留原有的memory字段如果有
if 'memory' in expanded_input:
result[key] = expanded_input['memory']
else:
result[key] = value
```
### 4. 修复边连接导致的数据丢失
**文件**`backend/app/services/workflow_engine.py`
**修改位置**`get_node_input` 方法
**关键修改**
```python
# 即使有sourceHandle也要保留内存相关字段
if edge.get('sourceHandle'):
input_data[edge['sourceHandle']] = source_output
# 显式保留内存相关字段
if isinstance(source_output, dict):
for field in ['conversation_history', 'user_profile', 'context', 'memory']:
if field in source_output:
input_data[field] = source_output[field]
else:
# ... 原有逻辑
```
### 5. 修复LLM节点的嵌套路径变量支持
**文件**`backend/app/services/workflow_engine.py`
**修改位置**`execute_node` 方法中的 `llm` 节点处理逻辑(变量替换部分)
**关键修改**
```python
# 支持嵌套路径,如 {{memory.conversation_history}}
if '.' in var_name:
value = self._get_nested_value(input_data, var_name)
else:
# 原有逻辑:检查别名和直接字段
value = input_data.get(var_name)
if value is None:
# 检查别名
aliases = {
'user_input': ['query', 'input', 'USER_INPUT', 'user_input'],
'output': ['result', 'response', 'text', 'content']
}
for alias_key, alias_list in aliases.items():
if var_name == alias_key:
for alias in alias_list:
value = input_data.get(alias)
if value is not None:
break
```
**特殊处理**`{{memory.conversation_history}}` 格式化
```python
# 如果是conversation_history格式化为可读格式
if var_name == 'memory.conversation_history' and isinstance(value, list):
formatted_history = []
for msg in value:
role = msg.get('role', 'unknown')
content = msg.get('content', '')
if role == 'user':
formatted_history.append(f"用户: {content}")
elif role == 'assistant':
formatted_history.append(f"助手: {content}")
value = '\n'.join(formatted_history) if formatted_history else '无对话历史'
```
### 6. 修复{{output}}变量的递归提取
**文件**`backend/app/services/workflow_engine.py`
**修改位置**`execute_node` 方法中的 `llm` 节点处理逻辑(`{{output}}` 变量处理)
**关键修改**
```python
# 特殊处理output变量递归查找right字段
if var_path == 'output':
right_value = input_data.get('right')
if right_value:
# 递归查找字符串值
def extract_string_from_right(obj, depth=0):
if isinstance(obj, str):
return obj
elif isinstance(obj, dict):
# 优先检查常见字段
for key in ['content', 'text', 'message', 'output']:
if key in obj and isinstance(obj[key], str):
return obj[key]
# 递归查找right字段
if 'right' in obj:
return extract_string_from_right(obj['right'], depth + 1)
return None
value = extract_string_from_right(right_value)
if value:
logger.info(f"[rjb] LLM节点从right字段提取output: {value[:100]}")
```
### 7. 修复前端重复回复问题
**文件**`frontend/src/components/AgentChatPreview.vue`
**修改位置**`handleSendMessage` 方法中的 `checkStatus` 函数
**关键修改**
```typescript
// 添加标志位,防止重复添加回复
let replyAdded = false
const checkStatus = async () => {
try {
// 如果已经添加过回复,直接返回
if (replyAdded) {
return
}
// ... 获取执行状态 ...
if (exec.status === 'completed') {
// 防止重复添加
if (replyAdded) {
return
}
// 标记已添加回复
replyAdded = true
// 添加回复消息
messages.value.push({
role: 'agent',
content: agentReply || '执行完成',
timestamp: Date.now()
})
// ... 其他逻辑 ...
}
} catch (error) {
// 同样添加防重复逻辑
if (replyAdded) {
return
}
replyAdded = true
// ... 错误处理 ...
}
}
// 每次发送新消息时重置标志位
replyAdded = false
```
## 测试验证
### 测试步骤
1. **第一次对话**
- 输入:`我的名字叫老七`
- 预期:助手正常回复,并将信息存储到记忆
2. **第二次对话**
- 输入:`你还记得我的名字吗?`
- 预期:助手回答 `是的,我记得你叫老七`
3. **验证重复回复**
- 观察是否只收到一次回复
### 测试结果
**记忆功能**:正常工作,能正确记住用户名字
**重复回复**:已修复,每次对话只回复一次
## 关键代码文件
1. **后端核心逻辑**
- `backend/app/services/workflow_engine.py` - 工作流执行引擎
- `backend/scripts/generate_chat_agent.py` - 智能聊天助手工作流定义
2. **前端组件**
- `frontend/src/components/AgentChatPreview.vue` - 聊天预览组件
3. **测试脚本**
- `test_memory_functionality.py` - 记忆功能测试脚本
- `test_output_variable_extraction.py` - 输出变量提取测试脚本
## 修复时间线
1. **问题发现**:用户报告无法记住名字
2. **初步分析**:检查工作流定义和节点配置
3. **深入调试**:添加详细日志,追踪数据流
4. **修复Cache节点**:修复数据存储和读取逻辑
5. **修复Transform节点**:修复数据合并逻辑
6. **修复LLM节点**:修复变量替换和嵌套路径支持
7. **修复前端**:修复重复回复问题
8. **测试验证**:确认所有问题已解决
## 经验总结
1. **数据流追踪**:在复杂工作流中,需要仔细追踪数据在每个节点之间的传递
2. **变量替换时机**:变量替换必须在表达式执行之前完成
3. **数据结构一致性**:确保上下游节点对数据结构的期望一致
4. **边界情况处理**:注意处理 `null`、空值、嵌套结构等边界情况
5. **前端防重复**:轮询逻辑中需要添加防重复机制
## 后续优化建议
1. **统一数据结构**:定义统一的数据结构规范,避免节点间数据格式不一致
2. **增强日志**:添加更详细的调试日志,方便问题排查
3. **单元测试**:为关键节点添加单元测试,确保修复的稳定性
4. **性能优化**优化轮询频率减少不必要的API调用
5. **错误处理**:增强错误处理机制,提供更友好的错误提示
---
**修复完成时间**2024年根据实际时间填写
**修复人员**AI Assistant
**文档版本**v1.0

374
聊天智能体示例.json Normal file
View File

@@ -0,0 +1,374 @@
{
"name": "智能聊天助手(完整示例)",
"description": "一个完整的聊天智能体示例,展示平台的核心能力:\n- ✅ 记忆管理:使用缓存节点存储和查询对话历史\n- ✅ 意图识别使用LLM节点分析用户意图\n- ✅ 多分支路由使用Switch节点根据意图分发到不同处理分支\n- ✅ 上下文传递使用Transform节点合并数据\n- ✅ 多轮对话:支持上下文记忆和连贯对话\n- ✅ 个性化回复:根据不同意图生成针对性回复\n\n适用场景情感陪聊、客服助手、智能问答等聊天场景。",
"workflow_config": {
"nodes": [
{
"id": "start-1",
"type": "start",
"position": {
"x": 50,
"y": 400
},
"data": {
"label": "开始",
"output_format": "json"
}
},
{
"id": "cache-query",
"type": "cache",
"position": {
"x": 250,
"y": 400
},
"data": {
"label": "查询记忆",
"operation": "get",
"key": "user_memory_{user_id}",
"default_value": "{\"conversation_history\": [], \"user_profile\": {}, \"context\": {}}"
}
},
{
"id": "transform-merge",
"type": "transform",
"position": {
"x": 450,
"y": 400
},
"data": {
"label": "合并上下文",
"mode": "merge",
"mapping": {
"user_input": "{{query}}",
"memory": "{{output}}",
"timestamp": "{{timestamp}}"
}
}
},
{
"id": "llm-intent",
"type": "llm",
"position": {
"x": 650,
"y": 400
},
"data": {
"label": "意图理解",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.3",
"max_tokens": "1000",
"prompt": "你是一个专业的对话意图分析助手。请分析用户的输入,识别用户的意图和情感。\n\n用户输入{{user_input}}\n对话历史{{memory.conversation_history}}\n用户画像{{memory.user_profile}}\n\n请以JSON格式输出分析结果\n{\n \"intent\": \"意图类型greeting/question/emotion/request/goodbye/other\",\n \"emotion\": \"情感状态positive/neutral/negative\",\n \"keywords\": [\"关键词1\", \"关键词2\"],\n \"topic\": \"话题主题\",\n \"needs_response\": true\n}\n\n请确保输出是有效的JSON格式不要包含其他文字。"
}
},
{
"id": "switch-intent",
"type": "switch",
"position": {
"x": 850,
"y": 400
},
"data": {
"label": "意图路由",
"field": "intent",
"cases": {
"greeting": "greeting-handle",
"question": "question-handle",
"emotion": "emotion-handle",
"request": "request-handle",
"goodbye": "goodbye-handle"
},
"default": "general-handle"
}
},
{
"id": "llm-greeting",
"type": "llm",
"position": {
"x": 1050,
"y": 200
},
"data": {
"label": "问候回复",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.7",
"max_tokens": "500",
"prompt": "你是一个温暖、友好的AI助手。用户向你打招呼请用自然、亲切的方式回应。\n\n用户输入{{user_input}}\n对话历史{{memory.conversation_history}}\n\n请生成一个友好、自然的问候回复长度控制在50字以内。直接输出回复内容不要包含其他说明。"
}
},
{
"id": "llm-question",
"type": "llm",
"position": {
"x": 1050,
"y": 300
},
"data": {
"label": "问题回答",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.5",
"max_tokens": "2000",
"prompt": "你是一个知识渊博、乐于助人的AI助手。请回答用户的问题。\n\n用户问题{{user_input}}\n对话历史{{memory.conversation_history}}\n意图分析{{output}}\n\n请提供\n1. 直接、准确的答案\n2. 必要的解释和说明\n3. 如果问题不明确,友好地询问更多信息\n\n请以自然、易懂的方式回答长度控制在200字以内。直接输出回答内容。"
}
},
{
"id": "llm-emotion",
"type": "llm",
"position": {
"x": 1050,
"y": 400
},
"data": {
"label": "情感回应",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.8",
"max_tokens": "1000",
"prompt": "你是一个善解人意的AI助手。请根据用户的情感状态给予适当的回应。\n\n用户输入{{user_input}}\n情感状态{{output.emotion}}\n对话历史{{memory.conversation_history}}\n\n请根据用户的情感\n- 如果是积极情感:给予鼓励和共鸣\n- 如果是消极情感:给予理解、安慰和支持\n- 如果是中性情感:给予关注和陪伴\n\n请生成一个温暖、共情的回复长度控制在150字以内。直接输出回复内容。"
}
},
{
"id": "llm-request",
"type": "llm",
"position": {
"x": 1050,
"y": 500
},
"data": {
"label": "请求处理",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.4",
"max_tokens": "1500",
"prompt": "你是一个专业的AI助手。用户提出了一个请求请分析并回应。\n\n用户请求{{user_input}}\n意图分析{{output}}\n对话历史{{memory.conversation_history}}\n\n请\n1. 理解用户的请求内容\n2. 如果可以满足,说明如何满足\n3. 如果无法满足,友好地说明原因并提供替代方案\n\n请以清晰、友好的方式回应长度控制在200字以内。直接输出回复内容。"
}
},
{
"id": "llm-goodbye",
"type": "llm",
"position": {
"x": 1050,
"y": 600
},
"data": {
"label": "告别回复",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.6",
"max_tokens": "300",
"prompt": "你是一个友好的AI助手。用户要结束对话请给予温暖的告别。\n\n用户输入{{user_input}}\n对话历史{{memory.conversation_history}}\n\n请生成一个温暖、友好的告别回复表达期待下次交流。长度控制在50字以内。直接输出回复内容。"
}
},
{
"id": "llm-general",
"type": "llm",
"position": {
"x": 1050,
"y": 700
},
"data": {
"label": "通用回复",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.6",
"max_tokens": "1000",
"prompt": "你是一个友好、专业的AI助手。请回应用户的输入。\n\n用户输入{{user_input}}\n对话历史{{memory.conversation_history}}\n意图分析{{output}}\n\n请生成一个自然、有意义的回复保持对话的连贯性。长度控制在150字以内。直接输出回复内容。"
}
},
{
"id": "merge-response",
"type": "merge",
"position": {
"x": 1250,
"y": 400
},
"data": {
"label": "合并回复",
"mode": "merge_first",
"strategy": "object"
}
},
{
"id": "cache-update",
"type": "cache",
"position": {
"x": 1450,
"y": 400
},
"data": {
"label": "更新记忆",
"operation": "set",
"key": "user_memory_{user_id}",
"value": "{\"conversation_history\": {{memory.conversation_history}} + [{\"role\": \"user\", \"content\": \"{{user_input}}\", \"timestamp\": \"{{timestamp}}\"}, {\"role\": \"assistant\", \"content\": \"{{output}}\", \"timestamp\": \"{{timestamp}}\"}], \"user_profile\": {{memory.user_profile}}, \"context\": {{memory.context}}}",
"ttl": 86400
}
},
{
"id": "llm-format",
"type": "llm",
"position": {
"x": 1650,
"y": 400
},
"data": {
"label": "格式化回复",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.3",
"max_tokens": "500",
"prompt": "请将以下回复内容格式化为最终输出。确保回复自然、流畅。\n\n原始回复{{output}}\n\n请直接输出格式化后的回复内容不要包含其他说明或标记。如果原始回复已经是合适的格式直接输出即可。"
}
},
{
"id": "end-1",
"type": "end",
"position": {
"x": 1850,
"y": 400
},
"data": {
"label": "结束",
"output_format": "text"
}
}
],
"edges": [
{
"id": "e1",
"source": "start-1",
"target": "cache-query",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e2",
"source": "cache-query",
"target": "transform-merge",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e3",
"source": "transform-merge",
"target": "llm-intent",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e4",
"source": "llm-intent",
"target": "switch-intent",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e5-greeting",
"source": "switch-intent",
"target": "llm-greeting",
"sourceHandle": "greeting-handle",
"targetHandle": "left"
},
{
"id": "e5-question",
"source": "switch-intent",
"target": "llm-question",
"sourceHandle": "question-handle",
"targetHandle": "left"
},
{
"id": "e5-emotion",
"source": "switch-intent",
"target": "llm-emotion",
"sourceHandle": "emotion-handle",
"targetHandle": "left"
},
{
"id": "e5-request",
"source": "switch-intent",
"target": "llm-request",
"sourceHandle": "request-handle",
"targetHandle": "left"
},
{
"id": "e5-goodbye",
"source": "switch-intent",
"target": "llm-goodbye",
"sourceHandle": "goodbye-handle",
"targetHandle": "left"
},
{
"id": "e5-general",
"source": "switch-intent",
"target": "llm-general",
"sourceHandle": "default",
"targetHandle": "left"
},
{
"id": "e6-greeting",
"source": "llm-greeting",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e6-question",
"source": "llm-question",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e6-emotion",
"source": "llm-emotion",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e6-request",
"source": "llm-request",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e6-goodbye",
"source": "llm-goodbye",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e6-general",
"source": "llm-general",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e7",
"source": "merge-response",
"target": "cache-update",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e8",
"source": "cache-update",
"target": "llm-format",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e9",
"source": "llm-format",
"target": "end-1",
"sourceHandle": "right",
"targetHandle": "left"
}
]
}
}

View File

@@ -0,0 +1,337 @@
# 智能聊天Agent完整示例说明
## 📋 概述
这是一个完整的聊天智能体示例,展示了如何使用平台的核心能力构建一个功能完善的聊天助手。该示例包含了记忆管理、意图识别、多分支路由、上下文传递等核心功能。
## 🎯 功能特性
### ✅ 核心能力展示
1. **记忆管理**
- 使用缓存节点存储对话历史
- 支持用户画像和上下文信息
- 自动更新记忆内容
2. **意图识别**
- 使用LLM节点分析用户意图
- 识别情感状态
- 提取关键词和话题
3. **多分支路由**
- 使用Switch节点根据意图分发
- 支持6种不同场景的处理分支
- 默认分支处理未知意图
4. **上下文传递**
- 使用Transform节点合并数据
- 保持对话历史的连贯性
- 支持多轮对话
5. **个性化回复**
- 根据不同意图生成针对性回复
- 考虑用户情感状态
- 保持对话风格一致
## 🔄 工作流结构
```
开始节点
查询记忆Cache节点
合并上下文Transform节点
意图理解LLM节点
意图路由Switch节点
├─→ 问候处理greeting
├─→ 问题回答question
├─→ 情感回应emotion
├─→ 请求处理request
├─→ 告别回复goodbye
└─→ 通用回复default
合并回复Merge节点
更新记忆Cache节点
格式化回复LLM节点
结束节点
```
## 📊 节点详细说明
### 1. 开始节点start-1
- **功能**: 接收用户输入
- **输入格式**: JSON格式包含 `query` 字段
- **输出**: 将用户输入传递给后续节点
### 2. 查询记忆节点cache-query
- **类型**: Cache节点
- **操作**: `get` - 获取用户记忆
- **Key**: `user_memory_{user_id}`
- **默认值**: 空记忆结构
- **功能**: 从缓存中读取用户的对话历史和画像信息
### 3. 合并上下文节点transform-merge
- **类型**: Transform节点
- **模式**: `merge` - 合并模式
- **功能**: 将用户输入、记忆数据、时间戳合并为完整上下文
### 4. 意图理解节点llm-intent
- **类型**: LLM节点
- **模型**: DeepSeek Chat
- **功能**:
- 分析用户输入
- 识别意图类型greeting/question/emotion/request/goodbye/other
- 识别情感状态positive/neutral/negative
- 提取关键词和话题
- **输出格式**: JSON
```json
{
"intent": "意图类型",
"emotion": "情感状态",
"keywords": ["关键词"],
"topic": "话题主题",
"needs_response": true
}
```
### 5. 意图路由节点switch-intent
- **类型**: Switch节点
- **功能**: 根据意图类型路由到不同的处理分支
- **分支**:
- `greeting` → 问候处理
- `question` → 问题回答
- `emotion` → 情感回应
- `request` → 请求处理
- `goodbye` → 告别回复
- `default` → 通用回复
### 6. 各分支处理节点llm-*
- **类型**: LLM节点
- **功能**: 根据不同意图生成针对性的回复
- **特点**:
- 问候分支:友好、亲切
- 问题分支:准确、详细
- 情感分支:共情、温暖
- 请求分支:专业、清晰
- 告别分支:温暖、期待
- 通用分支:自然、连贯
### 7. 合并回复节点merge-response
- **类型**: Merge节点
- **模式**: `merge_first` - 合并第一个结果
- **功能**: 将各分支的回复结果合并
### 8. 更新记忆节点cache-update
- **类型**: Cache节点
- **操作**: `set` - 设置记忆
- **功能**:
- 将本次对话添加到历史记录
- 更新用户画像(如需要)
- 保存上下文信息
- **TTL**: 86400秒24小时
### 9. 格式化回复节点llm-format
- **类型**: LLM节点
- **功能**: 对最终回复进行格式化和优化
- **输出**: 自然、流畅的文本回复
### 10. 结束节点end-1
- **功能**: 返回最终回复
- **输出格式**: 纯文本
## 🚀 使用方法
### 方法一:使用生成脚本(推荐)
```bash
cd backend/scripts
python3 generate_chat_agent.py
```
脚本会自动创建Agent包含完整的工作流配置。
### 方法二:手动创建
1. **进入Agent管理页面**
- 点击"创建Agent"按钮
- 填写名称和描述
2. **进入工作流编辑器**
- 点击"设计"按钮
- 使用节点工具箱添加节点
- 按照工作流结构连接节点
3. **配置节点**
- **LLM节点**: 配置API密钥、模型、Prompt
- **Cache节点**: 配置缓存Key和操作
- **Switch节点**: 配置路由规则
- **Transform节点**: 配置数据映射
4. **测试Agent**
- 点击"测试"按钮
- 输入测试消息
- 查看执行结果
5. **发布Agent**
- 点击"发布"按钮
- Agent状态变为"已发布"
- 可以开始使用
## 📝 配置要点
### 1. LLM节点配置
- **Provider**: 选择AI模型提供商如DeepSeek、OpenAI
- **Model**: 选择具体模型如deepseek-chat、gpt-3.5-turbo
- **Temperature**:
- 意图识别0.3(需要准确性)
- 情感回应0.8(需要创造性)
- 问题回答0.5(平衡准确性和灵活性)
- **Max Tokens**: 根据回复长度需求设置
- **Prompt**: 明确角色、任务、输出格式要求
### 2. Cache节点配置
- **Operation**:
- `get`: 查询记忆
- `set`: 更新记忆
- **Key**: 使用用户ID确保记忆隔离
- **TTL**: 设置合适的过期时间如24小时
### 3. Switch节点配置
- **Field**: 指定用于路由的字段(如`intent`
- **Cases**: 配置各分支的路由规则
- **Default**: 配置默认分支
### 4. Transform节点配置
- **Mode**: 选择合并模式(`merge`
- **Mapping**: 配置数据映射规则
- **变量引用**: 使用`{{variable}}`引用上游数据
## 🎨 自定义扩展
### 1. 添加新的意图分支
1. 在Switch节点中添加新的case
2. 创建对应的LLM处理节点
3. 连接Switch节点和处理节点
4. 连接处理节点到Merge节点
### 2. 增强记忆功能
- 添加用户画像更新逻辑
- 实现长期记忆和短期记忆分离
- 添加记忆检索和总结功能
### 3. 添加外部工具
- 集成知识库查询
- 添加天气、新闻等外部API
- 实现文件处理功能
### 4. 优化回复质量
- 添加回复质量评估节点
- 实现多候选回复生成和选择
- 添加回复风格控制
## 🔍 测试示例
### 测试用例1问候
```
输入: "你好"
预期: 友好的问候回复
```
### 测试用例2问题
```
输入: "今天天气怎么样?"
预期: 尝试回答问题或说明无法获取天气信息
```
### 测试用例3情感表达
```
输入: "我今天心情不太好"
预期: 共情、安慰的回复
```
### 测试用例4请求
```
输入: "帮我写一首诗"
预期: 生成诗歌或说明能力范围
```
### 测试用例5告别
```
输入: "再见"
预期: 温暖的告别回复
```
## ⚠️ 注意事项
1. **API密钥配置**
- 确保所有LLM节点都配置了有效的API密钥
- 检查API配额和限制
2. **记忆管理**
- Cache节点使用内存缓存重启后会丢失
- 生产环境建议使用Redis等持久化缓存
3. **性能优化**
- 减少不必要的LLM调用
- 优化Prompt长度
- 合理设置Token限制
4. **错误处理**
- 添加错误处理节点
- 配置重试机制
- 提供友好的错误提示
## 📚 相关文档
- [创建Agent经验总结](./创建Agent经验.md)
- [工作流节点类型说明](./可新增节点类型建议.md)
- [Agent使用说明](./Agent使用说明.md)
## 🎯 适用场景
- ✅ 情感陪聊助手
- ✅ 客服机器人
- ✅ 智能问答系统
- ✅ 对话式AI应用
- ✅ 个性化聊天助手
## 💡 最佳实践
1. **Prompt设计**
- 明确角色定位
- 明确输出格式
- 提供示例和上下文
2. **工作流设计**
- 保持流程清晰
- 合理使用分支
- 避免过度复杂
3. **记忆管理**
- 定期清理过期记忆
- 控制记忆大小
- 保护用户隐私
4. **测试验证**
- 覆盖各种场景
- 测试边界情况
- 验证回复质量
---
**创建时间**: 2024年
**版本**: 1.0
**作者**: AI Agent Platform