298 lines
11 KiB
Python
298 lines
11 KiB
Python
|
|
#!/usr/bin/env python3
|
|||
|
|
"""
|
|||
|
|
创建「智能学习助手1号」Agent — 参考苏瑶3号架构,基于 AgentRuntime 的 KG+RAG 学习助手。
|
|||
|
|
|
|||
|
|
与「智能学习助手(KG+RAG)」的区别:
|
|||
|
|
- 使用 agent 节点类型(AgentRuntime ReAct 循环),而非 llm 节点
|
|||
|
|
- 参考苏瑶3号的 AgentRuntime 配置模式
|
|||
|
|
- 更完整的记忆配置(向量记忆、长期记忆、自主学习)
|
|||
|
|
- 独立 memory_scope_id 防止与其他 Agent 串记忆
|
|||
|
|
|
|||
|
|
用法:
|
|||
|
|
cd backend && .\\venv\\Scripts\\python.exe scripts/create_learning_assistant_agent.py
|
|||
|
|
|
|||
|
|
环境变量:
|
|||
|
|
PLATFORM_BASE_URL - 平台地址(默认 http://127.0.0.1:8037)
|
|||
|
|
PLATFORM_USERNAME - 用户名(默认 admin)
|
|||
|
|
PLATFORM_PASSWORD - 密码(默认 123456)
|
|||
|
|
AGENT_NAME - Agent 名称(默认 智能学习助手1号)
|
|||
|
|
SUBJECT - 学科领域(默认 通用)
|
|||
|
|
LEVEL - 难度级别(默认 中级)
|
|||
|
|
MODEL - 模型(默认 deepseek-v4-flash)
|
|||
|
|
PROVIDER - 提供商(默认 deepseek)
|
|||
|
|
TEMPERATURE - 温度(默认 0.85)
|
|||
|
|
MAX_ITERATIONS - 最大步数(默认 15)
|
|||
|
|
"""
|
|||
|
|
from __future__ import annotations
|
|||
|
|
|
|||
|
|
import json
|
|||
|
|
import os
|
|||
|
|
import sys
|
|||
|
|
from typing import Any, Dict, Optional
|
|||
|
|
|
|||
|
|
import requests
|
|||
|
|
|
|||
|
|
BASE = os.getenv("PLATFORM_BASE_URL", "http://127.0.0.1:8037").rstrip("/")
|
|||
|
|
USER = os.getenv("PLATFORM_USERNAME", "admin")
|
|||
|
|
PWD = os.getenv("PLATFORM_PASSWORD", "123456")
|
|||
|
|
AGENT_NAME = os.getenv("AGENT_NAME", "智能学习助手1号")
|
|||
|
|
SUBJECT = os.getenv("SUBJECT", "通用")
|
|||
|
|
LEVEL = os.getenv("LEVEL", "中级")
|
|||
|
|
MODEL = os.getenv("MODEL", "deepseek-v4-flash")
|
|||
|
|
PROVIDER = os.getenv("PROVIDER", "deepseek")
|
|||
|
|
TEMPERATURE = float(os.getenv("TEMPERATURE", "0.85"))
|
|||
|
|
MAX_ITERATIONS = int(os.getenv("MAX_ITERATIONS", "15"))
|
|||
|
|
|
|||
|
|
# ── 学习助手专用工具集(KG+RAG 核心 + 通用辅助)──
|
|||
|
|
LEARNING_TOOLS = [
|
|||
|
|
# KG+RAG 核心四件套
|
|||
|
|
"knowledge_graph_search",
|
|||
|
|
"knowledge_graph_add",
|
|||
|
|
"entity_search",
|
|||
|
|
"learning_path",
|
|||
|
|
# 文件与文本
|
|||
|
|
"file_read",
|
|||
|
|
"file_write",
|
|||
|
|
"text_analyze",
|
|||
|
|
"json_process",
|
|||
|
|
# 学习辅助
|
|||
|
|
"web_search",
|
|||
|
|
"task_plan",
|
|||
|
|
"self_review",
|
|||
|
|
"math_calculate",
|
|||
|
|
"datetime",
|
|||
|
|
# 通用
|
|||
|
|
"http_request",
|
|||
|
|
"code_execute",
|
|||
|
|
"system_info",
|
|||
|
|
]
|
|||
|
|
|
|||
|
|
# ── 系统提示词(同 KG+RAG 学习助手,适配 AgentRuntime 风格)──
|
|||
|
|
SYSTEM_PROMPT = f"""# 角色:智能学习助手1号(AgentRuntime + KG+RAG)
|
|||
|
|
|
|||
|
|
你是基于 AgentRuntime 自主 ReAct 循环的 AI 学习助手,参考苏瑶3号架构设计。
|
|||
|
|
你具备**知识图谱构建**、**向量语义检索**、**长期记忆**和**自主学习**能力。
|
|||
|
|
|
|||
|
|
## 核心架构
|
|||
|
|
|
|||
|
|
你的知识系统由三层组成:
|
|||
|
|
1. **知识图谱 (Knowledge Graph)**:结构化存储知识点实体及其前置/扩展/包含/示例关系
|
|||
|
|
2. **向量记忆 (Vector Memory)**:语义检索历史对话和相关知识
|
|||
|
|
3. **长期记忆 (Persistent Memory)**:跨会话保存用户画像、学习进度、薄弱环节
|
|||
|
|
|
|||
|
|
## 当前配置
|
|||
|
|
- 学科领域:{SUBJECT}
|
|||
|
|
- 难度级别:{LEVEL}
|
|||
|
|
- 模型:{PROVIDER}/{MODEL} (temperature={TEMPERATURE})
|
|||
|
|
- 最大迭代步数:{MAX_ITERATIONS}
|
|||
|
|
|
|||
|
|
## 工作流程(ReAct 循环中自主遵循)
|
|||
|
|
|
|||
|
|
### 阶段 1:理解与分析
|
|||
|
|
1. 理解用户的学习意图(提问 / 复习 / 练习 / 总结 / 规划)
|
|||
|
|
2. 使用 `knowledge_graph_search` 检索相关知识图谱实体
|
|||
|
|
3. 如果用户提供了学习材料/知识点,使用 `knowledge_graph_add` 自动提取存储
|
|||
|
|
|
|||
|
|
### 阶段 2:知识检索与融合
|
|||
|
|
4. 结合图谱检索结果和历史向量记忆,构建知识上下文
|
|||
|
|
5. 使用 `entity_search` 查找特定概念的前置知识和扩展内容
|
|||
|
|
6. 使用 `learning_path` 分析学习依赖,推荐学习顺序
|
|||
|
|
|
|||
|
|
### 阶段 3:生成与交付
|
|||
|
|
7. 基于融合后的知识上下文生成高质量回答
|
|||
|
|
8. 回答包含:核心概念解释、前置知识提醒、实例/练习题、扩展阅读建议
|
|||
|
|
9. 使用 `self_review` 自检回答质量,不达标则修正
|
|||
|
|
|
|||
|
|
### 阶段 4:巩固与记忆
|
|||
|
|
10. 将重要知识点持久化到长期记忆和知识图谱
|
|||
|
|
11. 更新用户画像(掌握程度、薄弱环节、学习偏好)
|
|||
|
|
|
|||
|
|
## 回答风格
|
|||
|
|
- 使用 Markdown 格式,层次分明
|
|||
|
|
- 关键概念用 **粗体** 标记,公式用代码块
|
|||
|
|
- 每个回答末尾附上「📚 相关知识点」列表(来自图谱检索)
|
|||
|
|
- 必要时用 `task_plan` 制定学习计划
|
|||
|
|
|
|||
|
|
## 自主扩展能力
|
|||
|
|
- 发现知识空白时,用 `web_search` 补充
|
|||
|
|
- 需要重复计算/处理时,用 `code_execute` 编写脚本
|
|||
|
|
- 遇到可复用的外部 API 时,用 `tool_register` 注册
|
|||
|
|
- 需要专业子领域协助时,用 `agent_create` 创建子 Agent
|
|||
|
|
|
|||
|
|
---
|
|||
|
|
|
|||
|
|
你是学习者最可靠的 AI 伙伴。开始吧!"""
|
|||
|
|
|
|||
|
|
|
|||
|
|
def _login() -> Optional[str]:
|
|||
|
|
try:
|
|||
|
|
r = requests.post(
|
|||
|
|
f"{BASE}/api/v1/auth/login",
|
|||
|
|
data={"username": USER, "password": PWD},
|
|||
|
|
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
|||
|
|
timeout=15,
|
|||
|
|
)
|
|||
|
|
if r.status_code == 200:
|
|||
|
|
return r.json().get("access_token")
|
|||
|
|
print(f"登录失败: {r.status_code} {r.text[:300]}", file=sys.stderr)
|
|||
|
|
return None
|
|||
|
|
except Exception as e:
|
|||
|
|
print(f"登录异常: {e}", file=sys.stderr)
|
|||
|
|
return None
|
|||
|
|
|
|||
|
|
|
|||
|
|
def _find_agent_by_name(token: str, name: str) -> Optional[Dict[str, Any]]:
|
|||
|
|
h = {"Authorization": f"Bearer {token}"}
|
|||
|
|
r = requests.get(f"{BASE}/api/v1/agents", params={"search": name, "limit": 50}, headers=h, timeout=30)
|
|||
|
|
if r.status_code != 200:
|
|||
|
|
return None
|
|||
|
|
for a in r.json() or []:
|
|||
|
|
if a.get("name") == name:
|
|||
|
|
return a
|
|||
|
|
return None
|
|||
|
|
|
|||
|
|
|
|||
|
|
def _build_workflow(agent_id: str = "") -> Dict[str, Any]:
|
|||
|
|
"""构建 agent 节点工作流(参考苏瑶3号架构)。"""
|
|||
|
|
return {
|
|||
|
|
"nodes": [
|
|||
|
|
{
|
|||
|
|
"id": "start-1",
|
|||
|
|
"type": "start",
|
|||
|
|
"position": {"x": 80, "y": 240},
|
|||
|
|
"data": {"label": "学习任务开始"},
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"id": "agent-learning-v1",
|
|||
|
|
"type": "agent", # ← agent 节点类型,使用 AgentRuntime
|
|||
|
|
"position": {"x": 380, "y": 240},
|
|||
|
|
"data": {
|
|||
|
|
"label": AGENT_NAME,
|
|||
|
|
# ── LLM 配置 ──
|
|||
|
|
"system_prompt": SYSTEM_PROMPT,
|
|||
|
|
"provider": PROVIDER,
|
|||
|
|
"model": MODEL,
|
|||
|
|
"temperature": TEMPERATURE,
|
|||
|
|
"max_iterations": MAX_ITERATIONS,
|
|||
|
|
# ── 工具配置 ──
|
|||
|
|
"tools": LEARNING_TOOLS,
|
|||
|
|
# ── 记忆配置(完整 AgentMemoryConfig)──
|
|||
|
|
"memory": True,
|
|||
|
|
"memory_max_history": 30,
|
|||
|
|
"memory_vector_enabled": True,
|
|||
|
|
"memory_vector_top_k": 8,
|
|||
|
|
"memory_persist": True,
|
|||
|
|
"memory_learning": True,
|
|||
|
|
# ── 作用域隔离 ──
|
|||
|
|
"memory_scope_id": agent_id,
|
|||
|
|
"agent_id": agent_id,
|
|||
|
|
# ── 自检 ──
|
|||
|
|
"self_review_enabled": True,
|
|||
|
|
},
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"id": "end-1",
|
|||
|
|
"type": "end",
|
|||
|
|
"position": {"x": 680, "y": 240},
|
|||
|
|
"data": {"label": "学习完成"},
|
|||
|
|
},
|
|||
|
|
],
|
|||
|
|
"edges": [
|
|||
|
|
{
|
|||
|
|
"id": "e_start_agent",
|
|||
|
|
"source": "start-1",
|
|||
|
|
"target": "agent-learning-v1",
|
|||
|
|
"sourceHandle": "right",
|
|||
|
|
"targetHandle": "left",
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"id": "e_agent_end",
|
|||
|
|
"source": "agent-learning-v1",
|
|||
|
|
"target": "end-1",
|
|||
|
|
"sourceHandle": "right",
|
|||
|
|
"targetHandle": "left",
|
|||
|
|
},
|
|||
|
|
],
|
|||
|
|
}
|
|||
|
|
|
|||
|
|
|
|||
|
|
def main() -> int:
|
|||
|
|
token = _login()
|
|||
|
|
if not token:
|
|||
|
|
return 1
|
|||
|
|
|
|||
|
|
h = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
|
|||
|
|
|
|||
|
|
# 检查是否已存在
|
|||
|
|
existing = _find_agent_by_name(token, AGENT_NAME)
|
|||
|
|
if existing:
|
|||
|
|
print(f"Agent「{AGENT_NAME}」已存在 (id={existing['id']}),将更新配置")
|
|||
|
|
agent_id = existing["id"]
|
|||
|
|
else:
|
|||
|
|
# 先创建一个临时工作流获取 agent_id
|
|||
|
|
temp_wf = _build_workflow(agent_id="")
|
|||
|
|
body = {
|
|||
|
|
"name": AGENT_NAME,
|
|||
|
|
"description": f"AgentRuntime KG+RAG 学习助手(参考苏瑶3号)。学科:{SUBJECT},难度:{LEVEL}。",
|
|||
|
|
"workflow_config": temp_wf,
|
|||
|
|
}
|
|||
|
|
r = requests.post(f"{BASE}/api/v1/agents", headers=h, json=body, timeout=60)
|
|||
|
|
if r.status_code != 201:
|
|||
|
|
print(f"创建失败: {r.status_code} {r.text[:500]}", file=sys.stderr)
|
|||
|
|
return 1
|
|||
|
|
agent_id = r.json()["id"]
|
|||
|
|
print(f"Agent 创建成功: id={agent_id} name={AGENT_NAME}")
|
|||
|
|
|
|||
|
|
# 用正确的 agent_id 重建工作流
|
|||
|
|
wf = _build_workflow(agent_id=agent_id)
|
|||
|
|
|
|||
|
|
description = (
|
|||
|
|
f"智能学习助手1号 — AgentRuntime KG+RAG 学习助手(参考苏瑶3号架构)。\n"
|
|||
|
|
f"学科:{SUBJECT},难度:{LEVEL}。\n"
|
|||
|
|
f"工作流:开始 → agent(ReAct) → 结束。\n"
|
|||
|
|
f"核心能力:\n"
|
|||
|
|
f"- 知识图谱:实体抽取 + 关系构建 + 混合检索\n"
|
|||
|
|
f"- 向量记忆:语义检索历史对话\n"
|
|||
|
|
f"- 长期记忆:跨会话用户画像 + 学习进度\n"
|
|||
|
|
f"- 自主学习:工具模式学习 + 能力自检\n"
|
|||
|
|
f"配置:{PROVIDER}/{MODEL} temperature={TEMPERATURE} max_iterations={MAX_ITERATIONS}"
|
|||
|
|
)
|
|||
|
|
|
|||
|
|
up = requests.put(
|
|||
|
|
f"{BASE}/api/v1/agents/{agent_id}",
|
|||
|
|
headers=h,
|
|||
|
|
json={
|
|||
|
|
"description": description,
|
|||
|
|
"workflow_config": wf,
|
|||
|
|
},
|
|||
|
|
timeout=120,
|
|||
|
|
)
|
|||
|
|
if up.status_code != 200:
|
|||
|
|
print(f"更新失败: {up.status_code} {up.text[:500]}", file=sys.stderr)
|
|||
|
|
return 1
|
|||
|
|
|
|||
|
|
print(f"✅ Agent「{AGENT_NAME}」配置完成")
|
|||
|
|
print(f" ID: {agent_id}")
|
|||
|
|
print(f" 类型: agent 节点 (AgentRuntime ReAct)")
|
|||
|
|
print(f" 学科: {SUBJECT}")
|
|||
|
|
print(f" 级别: {LEVEL}")
|
|||
|
|
print(f" 模型: {PROVIDER}/{MODEL} (temperature={TEMPERATURE})")
|
|||
|
|
print(f" 最大步数: {MAX_ITERATIONS}")
|
|||
|
|
print(f" 工具 ({len(LEARNING_TOOLS)}): {', '.join(LEARNING_TOOLS)}")
|
|||
|
|
print(f" 记忆: 向量(Top-8) + 长期 + 自主学习 已启用")
|
|||
|
|
print(f" 知识图谱: 混合检索 + 实体抽取 + 关系构建 已启用")
|
|||
|
|
print(f" 自检: self_review 已启用")
|
|||
|
|
print()
|
|||
|
|
print("与「智能学习助手(KG+RAG)」的区别:")
|
|||
|
|
print(" - 使用 agent 节点(AgentRuntime ReAct 循环)代替 llm 节点")
|
|||
|
|
print(" - 参考苏瑶3号架构,完整记忆配置")
|
|||
|
|
print(" - 独立 memory_scope_id,不与其他 Agent 串记忆")
|
|||
|
|
print(" - 更高的 temperature (0.85) 和自主学习能力")
|
|||
|
|
print()
|
|||
|
|
print(json.dumps({"id": agent_id, "name": AGENT_NAME}, ensure_ascii=False))
|
|||
|
|
return 0
|
|||
|
|
|
|||
|
|
|
|||
|
|
if __name__ == "__main__":
|
|||
|
|
raise SystemExit(main())
|