Files
aiagent/聊天智能体示例.json

374 lines
12 KiB
JSON
Raw Normal View History

2026-01-22 09:59:02 +08:00
{
"name": "智能聊天助手(完整示例)",
"description": "一个完整的聊天智能体示例,展示平台的核心能力:\n- ✅ 记忆管理:使用缓存节点存储和查询对话历史\n- ✅ 意图识别使用LLM节点分析用户意图\n- ✅ 多分支路由使用Switch节点根据意图分发到不同处理分支\n- ✅ 上下文传递使用Transform节点合并数据\n- ✅ 多轮对话:支持上下文记忆和连贯对话\n- ✅ 个性化回复:根据不同意图生成针对性回复\n\n适用场景情感陪聊、客服助手、智能问答等聊天场景。",
"workflow_config": {
"nodes": [
{
"id": "start-1",
"type": "start",
"position": {
"x": 50,
"y": 400
},
"data": {
"label": "开始",
"output_format": "json"
}
},
{
"id": "cache-query",
"type": "cache",
"position": {
"x": 250,
"y": 400
},
"data": {
"label": "查询记忆",
"operation": "get",
"key": "user_memory_{user_id}",
"default_value": "{\"conversation_history\": [], \"user_profile\": {}, \"context\": {}}"
}
},
{
"id": "transform-merge",
"type": "transform",
"position": {
"x": 450,
"y": 400
},
"data": {
"label": "合并上下文",
"mode": "merge",
"mapping": {
"user_input": "{{query}}",
"memory": "{{output}}",
"timestamp": "{{timestamp}}"
}
}
},
{
"id": "llm-intent",
"type": "llm",
"position": {
"x": 650,
"y": 400
},
"data": {
"label": "意图理解",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.3",
"max_tokens": "1000",
"prompt": "你是一个专业的对话意图分析助手。请分析用户的输入,识别用户的意图和情感。\n\n用户输入{{user_input}}\n对话历史{{memory.conversation_history}}\n用户画像{{memory.user_profile}}\n\n请以JSON格式输出分析结果\n{\n \"intent\": \"意图类型greeting/question/emotion/request/goodbye/other\",\n \"emotion\": \"情感状态positive/neutral/negative\",\n \"keywords\": [\"关键词1\", \"关键词2\"],\n \"topic\": \"话题主题\",\n \"needs_response\": true\n}\n\n请确保输出是有效的JSON格式不要包含其他文字。"
}
},
{
"id": "switch-intent",
"type": "switch",
"position": {
"x": 850,
"y": 400
},
"data": {
"label": "意图路由",
"field": "intent",
"cases": {
"greeting": "greeting-handle",
"question": "question-handle",
"emotion": "emotion-handle",
"request": "request-handle",
"goodbye": "goodbye-handle"
},
"default": "general-handle"
}
},
{
"id": "llm-greeting",
"type": "llm",
"position": {
"x": 1050,
"y": 200
},
"data": {
"label": "问候回复",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.7",
"max_tokens": "500",
"prompt": "你是一个温暖、友好的AI助手。用户向你打招呼请用自然、亲切的方式回应。\n\n用户输入{{user_input}}\n对话历史{{memory.conversation_history}}\n\n请生成一个友好、自然的问候回复长度控制在50字以内。直接输出回复内容不要包含其他说明。"
}
},
{
"id": "llm-question",
"type": "llm",
"position": {
"x": 1050,
"y": 300
},
"data": {
"label": "问题回答",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.5",
"max_tokens": "2000",
"prompt": "你是一个知识渊博、乐于助人的AI助手。请回答用户的问题。\n\n用户问题{{user_input}}\n对话历史{{memory.conversation_history}}\n意图分析{{output}}\n\n请提供\n1. 直接、准确的答案\n2. 必要的解释和说明\n3. 如果问题不明确,友好地询问更多信息\n\n请以自然、易懂的方式回答长度控制在200字以内。直接输出回答内容。"
}
},
{
"id": "llm-emotion",
"type": "llm",
"position": {
"x": 1050,
"y": 400
},
"data": {
"label": "情感回应",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.8",
"max_tokens": "1000",
"prompt": "你是一个善解人意的AI助手。请根据用户的情感状态给予适当的回应。\n\n用户输入{{user_input}}\n情感状态{{output.emotion}}\n对话历史{{memory.conversation_history}}\n\n请根据用户的情感\n- 如果是积极情感:给予鼓励和共鸣\n- 如果是消极情感:给予理解、安慰和支持\n- 如果是中性情感:给予关注和陪伴\n\n请生成一个温暖、共情的回复长度控制在150字以内。直接输出回复内容。"
}
},
{
"id": "llm-request",
"type": "llm",
"position": {
"x": 1050,
"y": 500
},
"data": {
"label": "请求处理",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.4",
"max_tokens": "1500",
"prompt": "你是一个专业的AI助手。用户提出了一个请求请分析并回应。\n\n用户请求{{user_input}}\n意图分析{{output}}\n对话历史{{memory.conversation_history}}\n\n请\n1. 理解用户的请求内容\n2. 如果可以满足,说明如何满足\n3. 如果无法满足,友好地说明原因并提供替代方案\n\n请以清晰、友好的方式回应长度控制在200字以内。直接输出回复内容。"
}
},
{
"id": "llm-goodbye",
"type": "llm",
"position": {
"x": 1050,
"y": 600
},
"data": {
"label": "告别回复",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.6",
"max_tokens": "300",
"prompt": "你是一个友好的AI助手。用户要结束对话请给予温暖的告别。\n\n用户输入{{user_input}}\n对话历史{{memory.conversation_history}}\n\n请生成一个温暖、友好的告别回复表达期待下次交流。长度控制在50字以内。直接输出回复内容。"
}
},
{
"id": "llm-general",
"type": "llm",
"position": {
"x": 1050,
"y": 700
},
"data": {
"label": "通用回复",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.6",
"max_tokens": "1000",
"prompt": "你是一个友好、专业的AI助手。请回应用户的输入。\n\n用户输入{{user_input}}\n对话历史{{memory.conversation_history}}\n意图分析{{output}}\n\n请生成一个自然、有意义的回复保持对话的连贯性。长度控制在150字以内。直接输出回复内容。"
}
},
{
"id": "merge-response",
"type": "merge",
"position": {
"x": 1250,
"y": 400
},
"data": {
"label": "合并回复",
"mode": "merge_first",
"strategy": "object"
}
},
{
"id": "cache-update",
"type": "cache",
"position": {
"x": 1450,
"y": 400
},
"data": {
"label": "更新记忆",
"operation": "set",
"key": "user_memory_{user_id}",
"value": "{\"conversation_history\": {{memory.conversation_history}} + [{\"role\": \"user\", \"content\": \"{{user_input}}\", \"timestamp\": \"{{timestamp}}\"}, {\"role\": \"assistant\", \"content\": \"{{output}}\", \"timestamp\": \"{{timestamp}}\"}], \"user_profile\": {{memory.user_profile}}, \"context\": {{memory.context}}}",
"ttl": 86400
}
},
{
"id": "llm-format",
"type": "llm",
"position": {
"x": 1650,
"y": 400
},
"data": {
"label": "格式化回复",
"provider": "deepseek",
"model": "deepseek-chat",
"temperature": "0.3",
"max_tokens": "500",
"prompt": "请将以下回复内容格式化为最终输出。确保回复自然、流畅。\n\n原始回复{{output}}\n\n请直接输出格式化后的回复内容不要包含其他说明或标记。如果原始回复已经是合适的格式直接输出即可。"
}
},
{
"id": "end-1",
"type": "end",
"position": {
"x": 1850,
"y": 400
},
"data": {
"label": "结束",
"output_format": "text"
}
}
],
"edges": [
{
"id": "e1",
"source": "start-1",
"target": "cache-query",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e2",
"source": "cache-query",
"target": "transform-merge",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e3",
"source": "transform-merge",
"target": "llm-intent",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e4",
"source": "llm-intent",
"target": "switch-intent",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e5-greeting",
"source": "switch-intent",
"target": "llm-greeting",
"sourceHandle": "greeting-handle",
"targetHandle": "left"
},
{
"id": "e5-question",
"source": "switch-intent",
"target": "llm-question",
"sourceHandle": "question-handle",
"targetHandle": "left"
},
{
"id": "e5-emotion",
"source": "switch-intent",
"target": "llm-emotion",
"sourceHandle": "emotion-handle",
"targetHandle": "left"
},
{
"id": "e5-request",
"source": "switch-intent",
"target": "llm-request",
"sourceHandle": "request-handle",
"targetHandle": "left"
},
{
"id": "e5-goodbye",
"source": "switch-intent",
"target": "llm-goodbye",
"sourceHandle": "goodbye-handle",
"targetHandle": "left"
},
{
"id": "e5-general",
"source": "switch-intent",
"target": "llm-general",
"sourceHandle": "default",
"targetHandle": "left"
},
{
"id": "e6-greeting",
"source": "llm-greeting",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e6-question",
"source": "llm-question",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e6-emotion",
"source": "llm-emotion",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e6-request",
"source": "llm-request",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e6-goodbye",
"source": "llm-goodbye",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e6-general",
"source": "llm-general",
"target": "merge-response",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e7",
"source": "merge-response",
"target": "cache-update",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e8",
"source": "cache-update",
"target": "llm-format",
"sourceHandle": "right",
"targetHandle": "left"
},
{
"id": "e9",
"source": "llm-format",
"target": "end-1",
"sourceHandle": "right",
"targetHandle": "left"
}
]
}
}