544 lines
17 KiB
Python
544 lines
17 KiB
Python
#!/usr/bin/env python3
|
||
"""
|
||
生成智能聊天Agent示例
|
||
展示如何使用平台能力构建一个完整的聊天智能体,包含:
|
||
- 记忆管理(缓存节点)
|
||
- 意图识别(LLM节点)
|
||
- 多分支路由(Switch节点)
|
||
- 上下文传递(Transform节点)
|
||
- 多轮对话支持
|
||
"""
|
||
import sys
|
||
import os
|
||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||
|
||
from sqlalchemy.orm import Session
|
||
from app.core.database import SessionLocal
|
||
from app.models.agent import Agent
|
||
from app.models.user import User
|
||
from datetime import datetime
|
||
import uuid
|
||
|
||
|
||
def generate_chat_agent(db: Session, user: User):
|
||
"""生成智能聊天Agent - 完整示例"""
|
||
nodes = []
|
||
edges = []
|
||
|
||
# ========== 1. 开始节点 ==========
|
||
start_node = {
|
||
"id": "start-1",
|
||
"type": "start",
|
||
"position": {"x": 50, "y": 400},
|
||
"data": {
|
||
"label": "开始",
|
||
"output_format": "json"
|
||
}
|
||
}
|
||
nodes.append(start_node)
|
||
|
||
# ========== 2. 查询记忆节点 ==========
|
||
query_memory_node = {
|
||
"id": "cache-query",
|
||
"type": "cache",
|
||
"position": {"x": 250, "y": 400},
|
||
"data": {
|
||
"label": "查询记忆",
|
||
"operation": "get",
|
||
"key": "user_memory_{user_id}",
|
||
"default_value": '{"conversation_history": [], "user_profile": {}, "context": {}}'
|
||
}
|
||
}
|
||
nodes.append(query_memory_node)
|
||
|
||
# ========== 3. 合并用户输入和记忆 ==========
|
||
merge_context_node = {
|
||
"id": "transform-merge",
|
||
"type": "transform",
|
||
"position": {"x": 450, "y": 400},
|
||
"data": {
|
||
"label": "合并上下文",
|
||
"mode": "merge",
|
||
"mapping": {
|
||
"user_input": "{{query}}",
|
||
"memory": "{{output}}",
|
||
"timestamp": "{{timestamp}}"
|
||
}
|
||
}
|
||
}
|
||
nodes.append(merge_context_node)
|
||
|
||
# ========== 4. 意图理解节点 ==========
|
||
intent_node = {
|
||
"id": "llm-intent",
|
||
"type": "llm",
|
||
"position": {"x": 650, "y": 400},
|
||
"data": {
|
||
"label": "意图理解",
|
||
"provider": "deepseek",
|
||
"model": "deepseek-chat",
|
||
"temperature": "0.3",
|
||
"max_tokens": "200",
|
||
"prompt": """你是一个专业的对话意图分析助手。请分析用户的输入,识别用户的意图和情感。
|
||
|
||
用户输入:{{user_input}}
|
||
对话历史:{{memory.conversation_history}}
|
||
用户画像:{{memory.user_profile}}
|
||
|
||
请以JSON格式输出分析结果:
|
||
{
|
||
"intent": "意图类型(greeting/question/emotion/request/goodbye/other)",
|
||
"emotion": "情感状态(positive/neutral/negative)",
|
||
"keywords": ["关键词1", "关键词2"],
|
||
"topic": "话题主题",
|
||
"needs_response": true
|
||
}
|
||
|
||
请确保输出是有效的JSON格式,不要包含其他文字。"""
|
||
}
|
||
}
|
||
nodes.append(intent_node)
|
||
|
||
# ========== 5. Switch节点 - 根据意图分支 ==========
|
||
switch_node = {
|
||
"id": "switch-intent",
|
||
"type": "switch",
|
||
"position": {"x": 850, "y": 400},
|
||
"data": {
|
||
"label": "意图路由",
|
||
"field": "intent",
|
||
"cases": {
|
||
"greeting": "greeting-handle",
|
||
"question": "question-handle",
|
||
"emotion": "emotion-handle",
|
||
"request": "request-handle",
|
||
"goodbye": "goodbye-handle"
|
||
},
|
||
"default": "general-handle"
|
||
}
|
||
}
|
||
nodes.append(switch_node)
|
||
|
||
# ========== 6. 问候处理分支 ==========
|
||
greeting_node = {
|
||
"id": "llm-greeting",
|
||
"type": "llm",
|
||
"position": {"x": 1050, "y": 200},
|
||
"data": {
|
||
"label": "问候回复",
|
||
"provider": "deepseek",
|
||
"model": "deepseek-chat",
|
||
"temperature": "0.7",
|
||
"max_tokens": "200",
|
||
"prompt": """你是一个温暖、友好的AI助手。用户向你打招呼,请用自然、亲切的方式回应。
|
||
|
||
用户输入:{{user_input}}
|
||
对话历史:{{memory.conversation_history}}
|
||
|
||
请生成一个友好、自然的问候回复,长度控制在50字以内。直接输出回复内容,不要包含其他说明。"""
|
||
}
|
||
}
|
||
nodes.append(greeting_node)
|
||
|
||
# ========== 7. 问题处理分支 ==========
|
||
question_node = {
|
||
"id": "llm-question",
|
||
"type": "llm",
|
||
"position": {"x": 1050, "y": 300},
|
||
"data": {
|
||
"label": "问题回答",
|
||
"provider": "deepseek",
|
||
"model": "deepseek-chat",
|
||
"temperature": "0.5",
|
||
"max_tokens": "500",
|
||
"prompt": """你是一个知识渊博、乐于助人的AI助手。请简洁、准确地回答用户的问题。
|
||
|
||
用户问题:{{user_input}}
|
||
对话历史:{{memory.conversation_history}}
|
||
意图分析:{{output}}
|
||
|
||
回答要求:
|
||
1. 直接给出核心答案,避免冗长描述
|
||
2. 如果是介绍类问题(如"你能做什么"),用简洁的要点列举,控制在100字以内
|
||
3. 如果是知识性问题,提供准确答案和简要说明,控制在150字以内
|
||
4. 如果问题不明确,友好地询问更多信息,控制在50字以内
|
||
|
||
请以自然、简洁的方式回答,避免重复和冗余。直接输出回答内容,无需额外格式化。"""
|
||
}
|
||
}
|
||
nodes.append(question_node)
|
||
|
||
# ========== 8. 情感处理分支 ==========
|
||
emotion_node = {
|
||
"id": "llm-emotion",
|
||
"type": "llm",
|
||
"position": {"x": 1050, "y": 400},
|
||
"data": {
|
||
"label": "情感回应",
|
||
"provider": "deepseek",
|
||
"model": "deepseek-chat",
|
||
"temperature": "0.8",
|
||
"max_tokens": "500",
|
||
"prompt": """你是一个善解人意的AI助手。请根据用户的情感状态,给予适当的回应。
|
||
|
||
用户输入:{{user_input}}
|
||
情感状态:{{output.emotion}}
|
||
对话历史:{{memory.conversation_history}}
|
||
|
||
请根据用户的情感:
|
||
- 如果是积极情感:给予鼓励和共鸣
|
||
- 如果是消极情感:给予理解、安慰和支持
|
||
- 如果是中性情感:给予关注和陪伴
|
||
|
||
请生成一个温暖、共情的回复,长度控制在150字以内。直接输出回复内容。"""
|
||
}
|
||
}
|
||
nodes.append(emotion_node)
|
||
|
||
# ========== 9. 请求处理分支 ==========
|
||
request_node = {
|
||
"id": "llm-request",
|
||
"type": "llm",
|
||
"position": {"x": 1050, "y": 500},
|
||
"data": {
|
||
"label": "请求处理",
|
||
"provider": "deepseek",
|
||
"model": "deepseek-chat",
|
||
"temperature": "0.4",
|
||
"max_tokens": "800",
|
||
"prompt": """你是一个专业的AI助手。用户提出了一个请求,请分析并回应。
|
||
|
||
用户请求:{{user_input}}
|
||
意图分析:{{output}}
|
||
对话历史:{{memory.conversation_history}}
|
||
|
||
请:
|
||
1. 理解用户的请求内容
|
||
2. 如果可以满足,说明如何满足
|
||
3. 如果无法满足,友好地说明原因并提供替代方案
|
||
|
||
请以清晰、友好的方式回应,长度控制在200字以内。直接输出回复内容。"""
|
||
}
|
||
}
|
||
nodes.append(request_node)
|
||
|
||
# ========== 10. 告别处理分支 ==========
|
||
goodbye_node = {
|
||
"id": "llm-goodbye",
|
||
"type": "llm",
|
||
"position": {"x": 1050, "y": 600},
|
||
"data": {
|
||
"label": "告别回复",
|
||
"provider": "deepseek",
|
||
"model": "deepseek-chat",
|
||
"temperature": "0.6",
|
||
"max_tokens": "150",
|
||
"prompt": """你是一个友好的AI助手。用户要结束对话,请给予温暖的告别。
|
||
|
||
用户输入:{{user_input}}
|
||
对话历史:{{memory.conversation_history}}
|
||
|
||
请生成一个温暖、友好的告别回复,表达期待下次交流。长度控制在50字以内。直接输出回复内容。"""
|
||
}
|
||
}
|
||
nodes.append(goodbye_node)
|
||
|
||
# ========== 11. 通用处理分支 ==========
|
||
general_node = {
|
||
"id": "llm-general",
|
||
"type": "llm",
|
||
"position": {"x": 1050, "y": 700},
|
||
"data": {
|
||
"label": "通用回复",
|
||
"provider": "deepseek",
|
||
"model": "deepseek-chat",
|
||
"temperature": "0.6",
|
||
"max_tokens": "500",
|
||
"prompt": """你是一个友好、专业的AI助手。请回应用户的输入。
|
||
|
||
用户输入:{{user_input}}
|
||
对话历史:{{memory.conversation_history}}
|
||
意图分析:{{output}}
|
||
|
||
请生成一个自然、有意义的回复,保持对话的连贯性。长度控制在150字以内。直接输出回复内容。"""
|
||
}
|
||
}
|
||
nodes.append(general_node)
|
||
|
||
# ========== 12. Merge节点 - 合并所有分支结果 ==========
|
||
merge_response_node = {
|
||
"id": "merge-response",
|
||
"type": "merge",
|
||
"position": {"x": 1250, "y": 400},
|
||
"data": {
|
||
"label": "合并回复",
|
||
"mode": "merge_first",
|
||
"strategy": "object"
|
||
}
|
||
}
|
||
nodes.append(merge_response_node)
|
||
|
||
# ========== 13. 更新记忆节点 ==========
|
||
update_memory_node = {
|
||
"id": "cache-update",
|
||
"type": "cache",
|
||
"position": {"x": 1450, "y": 400},
|
||
"data": {
|
||
"label": "更新记忆",
|
||
"operation": "set",
|
||
"key": "user_memory_{user_id}",
|
||
"value": '{"conversation_history": ({{memory.conversation_history}} + [{"role": "user", "content": "{{user_input}}", "timestamp": "{{timestamp}}"}, {"role": "assistant", "content": "{{output}}", "timestamp": "{{timestamp}}"}]), "user_profile": {{memory.user_profile}}, "context": {{memory.context}}}',
|
||
"ttl": 86400
|
||
}
|
||
}
|
||
nodes.append(update_memory_node)
|
||
|
||
# ========== 14. JSON提取节点 - 提取最终回答文本 ==========
|
||
json_extract_node = {
|
||
"id": "json-extract",
|
||
"type": "json",
|
||
"position": {"x": 1650, "y": 400},
|
||
"data": {
|
||
"label": "提取回答",
|
||
"operation": "extract",
|
||
"path": "right.right.right"
|
||
}
|
||
}
|
||
nodes.append(json_extract_node)
|
||
|
||
# ========== 15. 结束节点 ==========
|
||
end_node = {
|
||
"id": "end-1",
|
||
"type": "end",
|
||
"position": {"x": 1850, "y": 400},
|
||
"data": {
|
||
"label": "结束",
|
||
"output_format": "text"
|
||
}
|
||
}
|
||
nodes.append(end_node)
|
||
|
||
# ========== 连接边 ==========
|
||
# 开始 -> 查询记忆
|
||
edges.append({
|
||
"id": "e1",
|
||
"source": "start-1",
|
||
"target": "cache-query",
|
||
"sourceHandle": "right",
|
||
"targetHandle": "left"
|
||
})
|
||
|
||
# 查询记忆 -> 合并上下文
|
||
edges.append({
|
||
"id": "e2",
|
||
"source": "cache-query",
|
||
"target": "transform-merge",
|
||
"sourceHandle": "right",
|
||
"targetHandle": "left"
|
||
})
|
||
|
||
# 合并上下文 -> 意图理解
|
||
edges.append({
|
||
"id": "e3",
|
||
"source": "transform-merge",
|
||
"target": "llm-intent",
|
||
"sourceHandle": "right",
|
||
"targetHandle": "left"
|
||
})
|
||
|
||
# 意图理解 -> Switch路由
|
||
edges.append({
|
||
"id": "e4",
|
||
"source": "llm-intent",
|
||
"target": "switch-intent",
|
||
"sourceHandle": "right",
|
||
"targetHandle": "left"
|
||
})
|
||
|
||
# Switch -> 各分支处理节点
|
||
edges.append({
|
||
"id": "e5-greeting",
|
||
"source": "switch-intent",
|
||
"target": "llm-greeting",
|
||
"sourceHandle": "greeting-handle",
|
||
"targetHandle": "left"
|
||
})
|
||
edges.append({
|
||
"id": "e5-question",
|
||
"source": "switch-intent",
|
||
"target": "llm-question",
|
||
"sourceHandle": "question-handle",
|
||
"targetHandle": "left"
|
||
})
|
||
edges.append({
|
||
"id": "e5-emotion",
|
||
"source": "switch-intent",
|
||
"target": "llm-emotion",
|
||
"sourceHandle": "emotion-handle",
|
||
"targetHandle": "left"
|
||
})
|
||
edges.append({
|
||
"id": "e5-request",
|
||
"source": "switch-intent",
|
||
"target": "llm-request",
|
||
"sourceHandle": "request-handle",
|
||
"targetHandle": "left"
|
||
})
|
||
edges.append({
|
||
"id": "e5-goodbye",
|
||
"source": "switch-intent",
|
||
"target": "llm-goodbye",
|
||
"sourceHandle": "goodbye-handle",
|
||
"targetHandle": "left"
|
||
})
|
||
edges.append({
|
||
"id": "e5-general",
|
||
"source": "switch-intent",
|
||
"target": "llm-general",
|
||
"sourceHandle": "default",
|
||
"targetHandle": "left"
|
||
})
|
||
|
||
# 各分支 -> Merge节点
|
||
edges.append({
|
||
"id": "e6-greeting",
|
||
"source": "llm-greeting",
|
||
"target": "merge-response",
|
||
"sourceHandle": "right",
|
||
"targetHandle": "left"
|
||
})
|
||
edges.append({
|
||
"id": "e6-question",
|
||
"source": "llm-question",
|
||
"target": "merge-response",
|
||
"sourceHandle": "right",
|
||
"targetHandle": "left"
|
||
})
|
||
edges.append({
|
||
"id": "e6-emotion",
|
||
"source": "llm-emotion",
|
||
"target": "merge-response",
|
||
"sourceHandle": "right",
|
||
"targetHandle": "left"
|
||
})
|
||
edges.append({
|
||
"id": "e6-request",
|
||
"source": "llm-request",
|
||
"target": "merge-response",
|
||
"sourceHandle": "right",
|
||
"targetHandle": "left"
|
||
})
|
||
edges.append({
|
||
"id": "e6-goodbye",
|
||
"source": "llm-goodbye",
|
||
"target": "merge-response",
|
||
"sourceHandle": "right",
|
||
"targetHandle": "left"
|
||
})
|
||
edges.append({
|
||
"id": "e6-general",
|
||
"source": "llm-general",
|
||
"target": "merge-response",
|
||
"sourceHandle": "right",
|
||
"targetHandle": "left"
|
||
})
|
||
|
||
# Merge -> 更新记忆
|
||
edges.append({
|
||
"id": "e7",
|
||
"source": "merge-response",
|
||
"target": "cache-update",
|
||
"sourceHandle": "right",
|
||
"targetHandle": "left"
|
||
})
|
||
|
||
# 更新记忆 -> JSON提取
|
||
edges.append({
|
||
"id": "e8",
|
||
"source": "cache-update",
|
||
"target": "json-extract",
|
||
"sourceHandle": "right",
|
||
"targetHandle": "left"
|
||
})
|
||
|
||
# JSON提取 -> 结束
|
||
edges.append({
|
||
"id": "e9",
|
||
"source": "json-extract",
|
||
"target": "end-1",
|
||
"sourceHandle": "right",
|
||
"targetHandle": "left"
|
||
})
|
||
|
||
return {
|
||
"name": "智能聊天助手(完整示例)",
|
||
"description": """一个完整的聊天智能体示例,展示平台的核心能力:
|
||
- ✅ 记忆管理:使用缓存节点存储和查询对话历史
|
||
- ✅ 意图识别:使用LLM节点分析用户意图
|
||
- ✅ 多分支路由:使用Switch节点根据意图分发到不同处理分支
|
||
- ✅ 上下文传递:使用Transform节点合并数据
|
||
- ✅ 多轮对话:支持上下文记忆和连贯对话
|
||
- ✅ 个性化回复:根据不同意图生成针对性回复
|
||
|
||
适用场景:情感陪聊、客服助手、智能问答等聊天场景。""",
|
||
"workflow_config": {"nodes": nodes, "edges": edges}
|
||
}
|
||
|
||
|
||
def main():
|
||
"""主函数:生成并保存Agent"""
|
||
db = SessionLocal()
|
||
try:
|
||
# 获取或创建测试用户
|
||
user = db.query(User).filter(User.username == "admin").first()
|
||
if not user:
|
||
print("请先创建admin用户")
|
||
return
|
||
|
||
# 生成Agent
|
||
agent_data = generate_chat_agent(db, user)
|
||
|
||
# 检查是否已存在
|
||
existing = db.query(Agent).filter(
|
||
Agent.name == agent_data["name"],
|
||
Agent.user_id == user.id
|
||
).first()
|
||
|
||
if existing:
|
||
print(f"Agent '{agent_data['name']}' 已存在,跳过创建")
|
||
return
|
||
|
||
# 创建Agent
|
||
agent = Agent(
|
||
name=agent_data["name"],
|
||
description=agent_data["description"],
|
||
workflow_config=agent_data["workflow_config"],
|
||
user_id=user.id,
|
||
status="draft"
|
||
)
|
||
db.add(agent)
|
||
db.commit()
|
||
db.refresh(agent)
|
||
|
||
print(f"✅ 成功创建Agent: {agent.name} (ID: {agent.id})")
|
||
print(f" 节点数量: {len(agent_data['workflow_config']['nodes'])}")
|
||
print(f" 连接数量: {len(agent_data['workflow_config']['edges'])}")
|
||
print(f"\n📝 使用说明:")
|
||
print(f" 1. 在Agent管理页面找到 '{agent.name}'")
|
||
print(f" 2. 点击'设计'按钮进入工作流编辑器")
|
||
print(f" 3. 配置LLM节点的API密钥(如需要)")
|
||
print(f" 4. 点击'发布'按钮发布Agent")
|
||
print(f" 5. 点击'使用'按钮测试对话功能")
|
||
|
||
except Exception as e:
|
||
print(f"❌ 创建Agent失败: {str(e)}")
|
||
import traceback
|
||
traceback.print_exc()
|
||
db.rollback()
|
||
finally:
|
||
db.close()
|
||
|
||
|
||
if __name__ == "__main__":
|
||
main()
|