feat: Agent 运行时、对话 API、作业助手与引擎修复及前端执行超时
- agent_runtime 模块与 agent_chat API,前端 AgentChat 视图与路由对接 - workflow_engine: code 节点命名空间与 json 引用修复 - llm_service: 工具调用 extra_body(如 DeepSeek) - create_homework_manager_agent / _3 脚本与测试脚本扩展 - frontend: WORKFLOW_EXECUTION_HTTP_TIMEOUT_MS、AgentChatPreview/MainLayout 等 - 文档:架构说明与自主 Agent 改造完成情况 Made-with: Cursor
This commit is contained in:
32
backend/app/agent_runtime/__init__.py
Normal file
32
backend/app/agent_runtime/__init__.py
Normal file
@@ -0,0 +1,32 @@
|
||||
"""
|
||||
Agent Runtime — 自主 AI Agent 核心运行时。
|
||||
|
||||
提供 ReAct 循环驱动的自主 Agent,支持:
|
||||
- 工具调用(复用已有 ToolRegistry)
|
||||
- 分层记忆(工作记忆 + 长期记忆)
|
||||
- 多模型(OpenAI / DeepSeek)
|
||||
- 可嵌入工作流节点或独立运行
|
||||
"""
|
||||
from app.agent_runtime.core import AgentRuntime
|
||||
from app.agent_runtime.schemas import (
|
||||
AgentConfig,
|
||||
AgentResult,
|
||||
AgentLLMConfig,
|
||||
AgentToolConfig,
|
||||
AgentMemoryConfig,
|
||||
)
|
||||
from app.agent_runtime.context import AgentContext
|
||||
from app.agent_runtime.memory import AgentMemory
|
||||
from app.agent_runtime.tool_manager import AgentToolManager
|
||||
|
||||
__all__ = [
|
||||
"AgentRuntime",
|
||||
"AgentConfig",
|
||||
"AgentResult",
|
||||
"AgentLLMConfig",
|
||||
"AgentToolConfig",
|
||||
"AgentMemoryConfig",
|
||||
"AgentContext",
|
||||
"AgentMemory",
|
||||
"AgentToolManager",
|
||||
]
|
||||
87
backend/app/agent_runtime/context.py
Normal file
87
backend/app/agent_runtime/context.py
Normal file
@@ -0,0 +1,87 @@
|
||||
"""
|
||||
Agent 会话上下文管理:维护消息历史、状态追踪。
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import uuid
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
|
||||
class AgentContext:
|
||||
"""
|
||||
Agent 会话上下文:
|
||||
|
||||
- 消息历史(messages 列表,OpenAI 格式)
|
||||
- 会话元信息(session_id, user_id 等)
|
||||
- 执行追踪(iteration 计数, 工具调用统计)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
system_prompt: str = "你是一个有用的AI助手。",
|
||||
user_id: Optional[str] = None,
|
||||
session_id: Optional[str] = None,
|
||||
):
|
||||
self.session_id = session_id or str(uuid.uuid4())
|
||||
self.user_id = user_id
|
||||
self._messages: List[Dict[str, Any]] = []
|
||||
self._system_prompt = system_prompt
|
||||
# 执行状态
|
||||
self.iteration = 0
|
||||
self.tool_calls_made = 0
|
||||
|
||||
@property
|
||||
def messages(self) -> List[Dict[str, Any]]:
|
||||
"""获取完整消息列表(含 system prompt)。"""
|
||||
if self._system_prompt:
|
||||
# 确保 system prompt 始终在第一条
|
||||
has_system = (
|
||||
len(self._messages) > 0
|
||||
and self._messages[0].get("role") == "system"
|
||||
)
|
||||
if not has_system:
|
||||
return [
|
||||
{"role": "system", "content": self._system_prompt},
|
||||
*self._messages,
|
||||
]
|
||||
return self._messages
|
||||
|
||||
def add_user_message(self, content: str) -> None:
|
||||
"""添加用户消息。"""
|
||||
self._messages.append({"role": "user", "content": content})
|
||||
|
||||
def add_assistant_message(
|
||||
self,
|
||||
content: str,
|
||||
tool_calls: Optional[List[Dict[str, Any]]] = None,
|
||||
reasoning_content: Optional[str] = None,
|
||||
) -> None:
|
||||
"""添加助手回复。"""
|
||||
msg: Dict[str, Any] = {"role": "assistant", "content": content or ""}
|
||||
if tool_calls:
|
||||
msg["tool_calls"] = tool_calls
|
||||
if reasoning_content:
|
||||
msg["reasoning_content"] = reasoning_content
|
||||
self._messages.append(msg)
|
||||
|
||||
def add_tool_result(
|
||||
self, tool_call_id: str, tool_name: str, result: str
|
||||
) -> None:
|
||||
"""添加工具执行结果。"""
|
||||
self._messages.append({
|
||||
"role": "tool",
|
||||
"tool_call_id": tool_call_id,
|
||||
"content": result,
|
||||
"name": tool_name,
|
||||
})
|
||||
|
||||
def set_system_prompt(self, prompt: str) -> None:
|
||||
"""更新 system prompt(仅在未发送过消息时有效)。"""
|
||||
if not self._messages:
|
||||
self._system_prompt = prompt
|
||||
|
||||
def reset(self) -> None:
|
||||
"""重置上下文(保留 system prompt 和 session_id)。"""
|
||||
self._messages = []
|
||||
self.iteration = 0
|
||||
self.tool_calls_made = 0
|
||||
330
backend/app/agent_runtime/core.py
Normal file
330
backend/app/agent_runtime/core.py
Normal file
@@ -0,0 +1,330 @@
|
||||
"""
|
||||
Agent Runtime 核心 —— 自主 ReAct 循环。
|
||||
|
||||
流程:
|
||||
1. 接收用户输入 → 追加到消息列表
|
||||
2. 调用 LLM(携带 tools schema)
|
||||
3. 如果 LLM 返回工具调用 → 执行工具 → 结果追加到消息列表 → 回到 2
|
||||
4. 如果 LLM 返回文本 → 作为最终回答返回
|
||||
5. 超过 max_iterations → 强制终止
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
from app.agent_runtime.schemas import (
|
||||
AgentConfig,
|
||||
AgentResult,
|
||||
)
|
||||
from app.agent_runtime.context import AgentContext
|
||||
from app.agent_runtime.memory import AgentMemory
|
||||
from app.agent_runtime.tool_manager import AgentToolManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 可重试的 API 异常
|
||||
_RETRYABLE_ERRORS = (
|
||||
"timed out",
|
||||
"timeout",
|
||||
"connection error",
|
||||
"temporarily unavailable",
|
||||
"server disconnected",
|
||||
"rate limit",
|
||||
"too many requests",
|
||||
"internal server error",
|
||||
"service unavailable",
|
||||
)
|
||||
|
||||
|
||||
class AgentRuntime:
|
||||
"""
|
||||
自主 Agent 运行时。
|
||||
|
||||
用法:
|
||||
runtime = AgentRuntime(config)
|
||||
result = await runtime.run("帮我写个Python脚本")
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config: Optional[AgentConfig] = None,
|
||||
context: Optional[AgentContext] = None,
|
||||
memory: Optional[AgentMemory] = None,
|
||||
tool_manager: Optional[AgentToolManager] = None,
|
||||
execution_logger: Optional[Any] = None,
|
||||
on_tool_executed: Optional[Callable[[str], Any]] = None,
|
||||
):
|
||||
self.config = config or AgentConfig()
|
||||
self.context = context or AgentContext(
|
||||
system_prompt=self.config.system_prompt,
|
||||
user_id=self.config.user_id,
|
||||
)
|
||||
self.memory = memory or AgentMemory(
|
||||
scope_id=self.config.user_id or self.config.name,
|
||||
max_history=self.config.memory.max_history_messages,
|
||||
persist=self.config.memory.persist_to_db,
|
||||
)
|
||||
self.tool_manager = tool_manager or AgentToolManager(
|
||||
include_tools=self.config.tools.include_tools,
|
||||
exclude_tools=self.config.tools.exclude_tools,
|
||||
)
|
||||
self.execution_logger = execution_logger
|
||||
self.on_tool_executed = on_tool_executed
|
||||
self._memory_context_loaded = False
|
||||
|
||||
async def run(self, user_input: str) -> AgentResult:
|
||||
"""
|
||||
执行 Agent 单轮对话。
|
||||
|
||||
流程:加载记忆 → 追加用户消息 → ReAct 循环 → 保存记忆 → 返回结果。
|
||||
"""
|
||||
max_iter = max(1, self.config.llm.max_iterations)
|
||||
self.context.iteration = 0
|
||||
self.context.tool_calls_made = 0
|
||||
|
||||
# 1. 首次运行时加载长期记忆到 system prompt
|
||||
if not self._memory_context_loaded:
|
||||
await self._inject_memory_context()
|
||||
self._memory_context_loaded = True
|
||||
|
||||
# 2. 追加用户消息
|
||||
self.context.add_user_message(user_input)
|
||||
|
||||
# 3. ReAct 循环
|
||||
llm = _LLMClient(self.config.llm)
|
||||
tool_schemas = self.tool_manager.get_tool_schemas()
|
||||
has_tools = self.tool_manager.has_tools()
|
||||
|
||||
while self.context.iteration < max_iter:
|
||||
self.context.iteration += 1
|
||||
|
||||
# 裁剪过长历史
|
||||
messages = self.memory.trim_messages(self.context.messages)
|
||||
|
||||
# 调用 LLM
|
||||
try:
|
||||
response = await llm.chat(
|
||||
messages=messages,
|
||||
tools=tool_schemas if has_tools and self.context.iteration == 1 else
|
||||
(tool_schemas if has_tools else None),
|
||||
iteration=self.context.iteration,
|
||||
)
|
||||
except Exception as e:
|
||||
err_str = str(e)
|
||||
logger.error("LLM 调用失败 (iteration=%s): %s", self.context.iteration, err_str)
|
||||
if self.context.iteration < max_iter and self._is_retryable(err_str):
|
||||
continue
|
||||
return AgentResult(
|
||||
success=False,
|
||||
content=f"LLM 调用失败: {err_str}",
|
||||
iterations_used=self.context.iteration,
|
||||
tool_calls_made=self.context.tool_calls_made,
|
||||
error=err_str,
|
||||
)
|
||||
|
||||
# 解析工具调用
|
||||
tool_calls = self._extract_tool_calls(response)
|
||||
content = self._extract_content(response)
|
||||
|
||||
if not tool_calls:
|
||||
# LLM 直接返回文本 → 结束
|
||||
self.context.add_assistant_message(content)
|
||||
final_text = content or "(模型未返回有效内容)"
|
||||
# 保存记忆
|
||||
await self.memory.save_context(user_input, final_text)
|
||||
return AgentResult(
|
||||
success=True,
|
||||
content=final_text,
|
||||
iterations_used=self.context.iteration,
|
||||
tool_calls_made=self.context.tool_calls_made,
|
||||
)
|
||||
|
||||
# 有工具调用 → 先记录 assistant 消息(含 tool_calls + reasoning_content)
|
||||
reasoning = getattr(response, "reasoning_content", None) or (
|
||||
response.get("reasoning_content") if isinstance(response, dict) else None
|
||||
)
|
||||
self.context.add_assistant_message(content or "", tool_calls, reasoning)
|
||||
if self.execution_logger:
|
||||
self.execution_logger.info(
|
||||
f"Agent 调用 {len(tool_calls)} 个工具",
|
||||
data={"tool_calls": [tc["function"]["name"] for tc in tool_calls],
|
||||
"iteration": self.context.iteration},
|
||||
)
|
||||
|
||||
# 逐一执行工具
|
||||
for tc in tool_calls:
|
||||
tfn = tc.get("function", {})
|
||||
tname = tfn.get("name", "unknown")
|
||||
tcid = tc.get("id", f"call_{self.context.iteration}_{self.context.tool_calls_made}")
|
||||
|
||||
try:
|
||||
targs = json.loads(tfn.get("arguments", "{}"))
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
targs = {}
|
||||
|
||||
logger.info("Agent 执行工具 [%s]: %s", tname, targs)
|
||||
result = await self.tool_manager.execute(tname, targs)
|
||||
|
||||
self.context.add_tool_result(tcid, tname, result)
|
||||
self.context.tool_calls_made += 1
|
||||
|
||||
if self.on_tool_executed:
|
||||
try:
|
||||
await self.on_tool_executed(tname)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if self.execution_logger:
|
||||
preview = result[:300] + "..." if len(result) > 300 else result
|
||||
self.execution_logger.info(
|
||||
f"工具 {tname} 执行完成",
|
||||
data={"tool_name": tname, "result_preview": preview},
|
||||
)
|
||||
|
||||
# 达到最大迭代次数
|
||||
last_content = ""
|
||||
for m in reversed(self.context.messages):
|
||||
if m.get("role") == "assistant" and m.get("content"):
|
||||
last_content = m["content"]
|
||||
break
|
||||
|
||||
logger.warning("Agent 达到最大迭代次数 (%s)", max_iter)
|
||||
await self.memory.save_context(user_input, last_content or "(已达最大迭代次数)")
|
||||
return AgentResult(
|
||||
success=True,
|
||||
content=last_content or "已达最大迭代次数,但模型未返回最终回答。",
|
||||
truncated=True,
|
||||
iterations_used=self.context.iteration,
|
||||
tool_calls_made=self.context.tool_calls_made,
|
||||
)
|
||||
|
||||
async def _inject_memory_context(self) -> None:
|
||||
"""加载长期记忆并注入 system prompt。"""
|
||||
mem_text = await self.memory.initialize()
|
||||
if mem_text:
|
||||
enriched = (
|
||||
self.config.system_prompt.rstrip("\n")
|
||||
+ "\n\n"
|
||||
+ mem_text
|
||||
)
|
||||
self.context.set_system_prompt(enriched)
|
||||
logger.info("Agent 已注入长期记忆上下文")
|
||||
|
||||
@staticmethod
|
||||
def _extract_tool_calls(response: Any) -> List[Dict[str, Any]]:
|
||||
"""从 LLM 响应中提取工具调用列表。"""
|
||||
if response is None:
|
||||
return []
|
||||
# OpenAI SDK 格式
|
||||
if hasattr(response, "tool_calls") and response.tool_calls:
|
||||
result = []
|
||||
for tc in response.tool_calls:
|
||||
result.append({
|
||||
"id": tc.id,
|
||||
"type": tc.type,
|
||||
"function": {
|
||||
"name": tc.function.name,
|
||||
"arguments": tc.function.arguments,
|
||||
},
|
||||
})
|
||||
return result
|
||||
# 字典格式
|
||||
if isinstance(response, dict):
|
||||
tc_list = response.get("tool_calls") or []
|
||||
if tc_list:
|
||||
return tc_list
|
||||
# 检查 content 中是否嵌入了 DSML
|
||||
content = response.get("content") or ""
|
||||
if "invoke" in content or "function_call" in content:
|
||||
from app.services.llm_service import _parse_dsml_tool_invocations
|
||||
dsml = _parse_dsml_tool_invocations(content)
|
||||
if dsml:
|
||||
return [
|
||||
{
|
||||
"id": f"dsml-{i}",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": inv["name"],
|
||||
"arguments": json.dumps(inv["arguments"], ensure_ascii=False),
|
||||
},
|
||||
}
|
||||
for i, inv in enumerate(dsml)
|
||||
]
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def _extract_content(response: Any) -> str:
|
||||
"""从 LLM 响应中提取文本内容。"""
|
||||
if response is None:
|
||||
return ""
|
||||
if hasattr(response, "content"):
|
||||
return response.content or ""
|
||||
if isinstance(response, dict):
|
||||
return response.get("content") or ""
|
||||
return str(response)
|
||||
|
||||
@staticmethod
|
||||
def _is_retryable(err_str: str) -> bool:
|
||||
"""判断错误是否可重试。"""
|
||||
err_lower = err_str.lower()
|
||||
return any(kw in err_lower for kw in _RETRYABLE_ERRORS)
|
||||
|
||||
|
||||
class _LLMClient:
|
||||
"""轻量 LLM 客户端包装,复用已有 LLMService 能力。"""
|
||||
|
||||
def __init__(self, config: Any):
|
||||
from app.services.llm_service import llm_service
|
||||
self._service = llm_service
|
||||
self._config = config
|
||||
|
||||
async def chat(
|
||||
self,
|
||||
messages: List[Dict[str, Any]],
|
||||
tools: Optional[List[Dict[str, Any]]] = None,
|
||||
iteration: int = 1,
|
||||
) -> Any:
|
||||
"""
|
||||
调用 LLM。
|
||||
优先使用 llm_service.call_openai_with_tools(支持 ReAct 的多次工具调用)。
|
||||
|
||||
但为避免外层 ReAct 与内部 ReAct 冲突:
|
||||
- 第 1 轮:使用标准 chat(无内部 ReAct),由外层 AgentRuntime 控制循环
|
||||
- 后续轮次:也使用标准 chat,仅追加工具结果
|
||||
"""
|
||||
# 直接用 OpenAI/DeepSeek SDK 调用,由 AgentRuntime 控制循环
|
||||
from openai import AsyncOpenAI
|
||||
from app.core.config import settings
|
||||
|
||||
# 优先从配置读取,其次从 settings(.env 加载),最后 os.environ
|
||||
api_key = self._config.api_key or settings.OPENAI_API_KEY or ""
|
||||
base_url = self._config.base_url or settings.OPENAI_BASE_URL or ""
|
||||
|
||||
if not api_key or api_key == "your-openai-api-key":
|
||||
# 尝试 DeepSeek
|
||||
api_key = self._config.api_key or settings.DEEPSEEK_API_KEY or ""
|
||||
base_url = self._config.base_url or settings.DEEPSEEK_BASE_URL or "https://api.deepseek.com"
|
||||
|
||||
if not api_key:
|
||||
raise ValueError("未配置 API Key")
|
||||
|
||||
client = AsyncOpenAI(api_key=api_key, base_url=base_url)
|
||||
|
||||
kwargs: Dict[str, Any] = {
|
||||
"model": self._config.model,
|
||||
"messages": messages,
|
||||
"temperature": self._config.temperature,
|
||||
"timeout": self._config.request_timeout,
|
||||
}
|
||||
if self._config.max_tokens:
|
||||
kwargs["max_tokens"] = self._config.max_tokens
|
||||
if self._config.extra_body:
|
||||
kwargs["extra_body"] = self._config.extra_body
|
||||
if tools:
|
||||
kwargs["tools"] = tools
|
||||
kwargs["tool_choice"] = "auto"
|
||||
|
||||
response = await client.chat.completions.create(**kwargs)
|
||||
return response.choices[0].message
|
||||
135
backend/app/agent_runtime/memory.py
Normal file
135
backend/app/agent_runtime/memory.py
Normal file
@@ -0,0 +1,135 @@
|
||||
"""
|
||||
Agent 记忆管理:包装已有 persistent_memory_service,提供会话级和长期记忆。
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.core.database import SessionLocal
|
||||
from app.services.persistent_memory_service import (
|
||||
load_persistent_memory,
|
||||
save_persistent_memory,
|
||||
persist_enabled,
|
||||
)
|
||||
from app.core.config import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentMemory:
|
||||
"""
|
||||
分层记忆管理器:
|
||||
|
||||
- 工作记忆:当前会话消息列表(由 AgentRuntime 直接管理)
|
||||
- 长期记忆:从 MySQL 加载/保存的用户画像和关键事实
|
||||
- 上下文压缩:对话过长时自动裁剪或总结
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
scope_kind: str = "agent",
|
||||
scope_id: Optional[str] = None,
|
||||
session_key: Optional[str] = None,
|
||||
persist: bool = True,
|
||||
max_history: int = 20,
|
||||
):
|
||||
self.scope_kind = scope_kind
|
||||
self.scope_id = scope_id or "default"
|
||||
self.session_key = session_key or "default_session"
|
||||
self.persist = persist and persist_enabled()
|
||||
self.max_history = max_history
|
||||
# 从长期记忆加载的上下文(启动时加载)
|
||||
self._long_term_context: Dict[str, Any] = {}
|
||||
|
||||
async def initialize(self) -> str:
|
||||
"""
|
||||
初始化记忆:从 DB/Redis 加载长期记忆,构造初始上下文文本。
|
||||
返回注入 system prompt 的记忆文本块。
|
||||
"""
|
||||
if not self.persist or not self.scope_id:
|
||||
return ""
|
||||
|
||||
db: Optional[Session] = None
|
||||
try:
|
||||
db = SessionLocal()
|
||||
payload = load_persistent_memory(
|
||||
db, self.scope_kind, self.scope_id, self.session_key
|
||||
)
|
||||
if payload and isinstance(payload, dict):
|
||||
self._long_term_context = payload
|
||||
# 构建注入 system prompt 的记忆文本
|
||||
parts = []
|
||||
profile = payload.get("user_profile")
|
||||
if profile and isinstance(profile, dict):
|
||||
profile_text = json.dumps(profile, ensure_ascii=False)
|
||||
parts.append(f"## 用户画像\n{profile_text}")
|
||||
|
||||
context = payload.get("context")
|
||||
if context and isinstance(context, dict):
|
||||
ctx_text = json.dumps(context, ensure_ascii=False)
|
||||
parts.append(f"## 上下文\n{ctx_text}")
|
||||
|
||||
history = payload.get("conversation_history")
|
||||
if history and isinstance(history, list) and len(history) > 0:
|
||||
summary = self._summarize_history(history)
|
||||
parts.append(f"## 历史对话摘要\n{summary}")
|
||||
|
||||
if parts:
|
||||
return "\n\n".join(parts)
|
||||
except Exception as e:
|
||||
logger.warning("加载长期记忆失败: %s", e)
|
||||
finally:
|
||||
if db:
|
||||
db.close()
|
||||
return ""
|
||||
|
||||
async def save_context(
|
||||
self, user_message: str, assistant_reply: str
|
||||
) -> None:
|
||||
"""将单轮对话保存到长期记忆。"""
|
||||
if not self.persist or not self.scope_id:
|
||||
return
|
||||
|
||||
# 更新上下文
|
||||
ctx = self._long_term_context.get("context", {})
|
||||
ctx["last_user_message"] = user_message[:500]
|
||||
ctx["last_assistant_reply"] = assistant_reply[:500]
|
||||
self._long_term_context["context"] = ctx
|
||||
|
||||
db: Optional[Session] = None
|
||||
try:
|
||||
db = SessionLocal()
|
||||
save_persistent_memory(
|
||||
db, self.scope_kind, self.scope_id,
|
||||
self.session_key, self._long_term_context,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("保存长期记忆失败: %s", e)
|
||||
finally:
|
||||
if db:
|
||||
db.close()
|
||||
|
||||
def trim_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
裁剪消息列表:保留最近的 N 条,但始终保留第一条 system 消息。
|
||||
"""
|
||||
if len(messages) <= self.max_history:
|
||||
return messages
|
||||
|
||||
system_msgs = [m for m in messages if m.get("role") == "system"]
|
||||
other_msgs = [m for m in messages if m.get("role") != "system"]
|
||||
|
||||
trimmed = other_msgs[-(self.max_history - len(system_msgs)):]
|
||||
return system_msgs + trimmed
|
||||
|
||||
@staticmethod
|
||||
def _summarize_history(history: List[Dict[str, Any]]) -> str:
|
||||
"""简单汇总历史对话(不做 LLM 压缩,仅计数)。"""
|
||||
turns = 0
|
||||
for m in history:
|
||||
if m.get("role") == "user":
|
||||
turns += 1
|
||||
return f"共 {turns} 轮历史对话(详情已存入长期记忆)"
|
||||
64
backend/app/agent_runtime/schemas.py
Normal file
64
backend/app/agent_runtime/schemas.py
Normal file
@@ -0,0 +1,64 @@
|
||||
"""
|
||||
Agent Runtime 配置与数据结构 Schema
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class AgentToolConfig(BaseModel):
|
||||
"""Agent 可用工具配置"""
|
||||
# 若为空列表则使用全部已注册工具
|
||||
include_tools: List[str] = Field(default_factory=list, description="允许的工具名称白名单")
|
||||
exclude_tools: List[str] = Field(default_factory=list, description="排除的工具名称黑名单")
|
||||
|
||||
|
||||
class AgentMemoryConfig(BaseModel):
|
||||
"""Agent 记忆配置"""
|
||||
enabled: bool = True
|
||||
max_history_messages: int = 20 # 注入 LLM 的上文最大消息数
|
||||
session_key: Optional[str] = None # 会话标识,默认自动生成
|
||||
persist_to_db: bool = True # 是否写入 MySQL 长期记忆
|
||||
|
||||
|
||||
class AgentLLMConfig(BaseModel):
|
||||
"""Agent 模型配置"""
|
||||
provider: str = "openai" # openai / deepseek
|
||||
model: str = "gpt-4o-mini"
|
||||
temperature: float = 0.7
|
||||
max_tokens: Optional[int] = None
|
||||
api_key: Optional[str] = None
|
||||
base_url: Optional[str] = None
|
||||
max_iterations: int = 10 # ReAct 循环最大步数
|
||||
request_timeout: float = 120.0
|
||||
extra_body: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class AgentConfig(BaseModel):
|
||||
"""Agent 完整配置"""
|
||||
name: str = "default_agent"
|
||||
system_prompt: str = "你是一个有用的AI助手。请使用可用工具来帮助用户完成任务。"
|
||||
llm: AgentLLMConfig = Field(default_factory=AgentLLMConfig)
|
||||
tools: AgentToolConfig = Field(default_factory=AgentToolConfig)
|
||||
memory: AgentMemoryConfig = Field(default_factory=AgentMemoryConfig)
|
||||
user_id: Optional[str] = None
|
||||
|
||||
|
||||
class AgentMessage(BaseModel):
|
||||
"""Agent 对话消息"""
|
||||
role: str # user / assistant / tool
|
||||
content: str
|
||||
tool_calls: Optional[List[Dict[str, Any]]] = None
|
||||
tool_call_id: Optional[str] = None
|
||||
name: Optional[str] = None
|
||||
|
||||
|
||||
class AgentResult(BaseModel):
|
||||
"""Agent 执行结果"""
|
||||
success: bool = True
|
||||
content: str = ""
|
||||
truncated: bool = False
|
||||
iterations_used: int = 0
|
||||
tool_calls_made: int = 0
|
||||
error: Optional[str] = None
|
||||
94
backend/app/agent_runtime/tool_manager.py
Normal file
94
backend/app/agent_runtime/tool_manager.py
Normal file
@@ -0,0 +1,94 @@
|
||||
"""
|
||||
Agent 工具管理器:包装已有 ToolRegistry,提供 Agent 需要的工具格式转换和执行。
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
from app.services.tool_registry import tool_registry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentToolManager:
|
||||
"""
|
||||
为 Agent Runtime 管理工具:
|
||||
- 将 ToolRegistry 的工具 schema 转为 OpenAI Function Calling 格式
|
||||
- 按 Agent 配置过滤(白名单/黑名单)
|
||||
- 执行工具调用并返回结果字符串
|
||||
"""
|
||||
|
||||
def __init__(self, include_tools: Optional[List[str]] = None,
|
||||
exclude_tools: Optional[List[str]] = None):
|
||||
self._include_tools: set = set(include_tools or [])
|
||||
self._exclude_tools: set = set(exclude_tools or [])
|
||||
|
||||
def get_tool_schemas(self) -> List[Dict[str, Any]]:
|
||||
"""获取 Agent 可用的工具定义列表(OpenAI Function Calling 格式)。"""
|
||||
all_schemas = tool_registry.get_all_tool_schemas()
|
||||
if not self._include_tools and not self._exclude_tools:
|
||||
return all_schemas
|
||||
|
||||
filtered = []
|
||||
for schema in all_schemas:
|
||||
name = self._extract_tool_name(schema)
|
||||
if not name:
|
||||
continue
|
||||
if self._include_tools and name not in self._include_tools:
|
||||
continue
|
||||
if name in self._exclude_tools:
|
||||
continue
|
||||
filtered.append(schema)
|
||||
return filtered
|
||||
|
||||
def has_tools(self) -> bool:
|
||||
"""是否有可用工具。"""
|
||||
return len(self.get_tool_schemas()) > 0
|
||||
|
||||
def tool_names(self) -> List[str]:
|
||||
"""可用工具名称列表。"""
|
||||
return [
|
||||
self._extract_tool_name(s) or "?"
|
||||
for s in self.get_tool_schemas()
|
||||
]
|
||||
|
||||
async def execute(self, name: str, args: Dict[str, Any]) -> str:
|
||||
"""
|
||||
执行工具调用。
|
||||
|
||||
Args:
|
||||
name: 工具名称
|
||||
args: 工具参数字典
|
||||
|
||||
Returns:
|
||||
工具执行结果的字符串表示
|
||||
"""
|
||||
func: Optional[Callable] = tool_registry.get_tool_function(name)
|
||||
if not func:
|
||||
err = f"工具 '{name}' 不存在"
|
||||
logger.error(err)
|
||||
return json.dumps({"error": err}, ensure_ascii=False)
|
||||
|
||||
logger.info("Agent 执行工具: %s, 参数: %s", name, args)
|
||||
try:
|
||||
import asyncio
|
||||
if asyncio.iscoroutinefunction(func):
|
||||
result = await func(**args)
|
||||
else:
|
||||
result = func(**args)
|
||||
|
||||
if isinstance(result, (dict, list)):
|
||||
return json.dumps(result, ensure_ascii=False)
|
||||
return str(result)
|
||||
except Exception as e:
|
||||
err_msg = f"工具 '{name}' 执行失败: {e}"
|
||||
logger.error(err_msg, exc_info=True)
|
||||
return json.dumps({"error": err_msg}, ensure_ascii=False)
|
||||
|
||||
@staticmethod
|
||||
def _extract_tool_name(schema: Dict[str, Any]) -> Optional[str]:
|
||||
"""从工具 schema 中提取工具名称。"""
|
||||
fn = schema.get("function") or schema
|
||||
return fn.get("name") if isinstance(fn, dict) else None
|
||||
115
backend/app/agent_runtime/workflow_integration.py
Normal file
115
backend/app/agent_runtime/workflow_integration.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""
|
||||
Agent Runtime ⇄ WorkflowEngine 桥接。
|
||||
|
||||
让 workflow_engine.execute_node() 通过寥寥几行调用 Agent Runtime。
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from app.agent_runtime.core import AgentRuntime
|
||||
from app.agent_runtime.schemas import (
|
||||
AgentConfig,
|
||||
AgentLLMConfig,
|
||||
AgentToolConfig,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def run_agent_node(
|
||||
node_data: Dict[str, Any],
|
||||
input_data: Dict[str, Any],
|
||||
execution_logger: Optional[Any] = None,
|
||||
user_id: Optional[str] = None,
|
||||
on_tool_executed: Optional[Any] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
在工作流中执行 Agent 节点。
|
||||
|
||||
node_data 支持的字段:
|
||||
system_prompt — Agent 人格/指令(支持 {{variable}} 模板)
|
||||
tools — 可选工具白名单,默认全部
|
||||
exclude_tools — 可选工具黑名单
|
||||
model — 模型名称
|
||||
provider — 提供商(openai/deepseek)
|
||||
temperature — 温度
|
||||
max_iterations — ReAct 最大步数
|
||||
memory — 是否启用长期记忆
|
||||
|
||||
input_data 中的 "query" 或 "input" 字段作为用户输入。
|
||||
"""
|
||||
# 1. 解析配置
|
||||
query = (
|
||||
input_data.get("query")
|
||||
or input_data.get("input")
|
||||
or input_data.get("text", "")
|
||||
)
|
||||
if not isinstance(query, str):
|
||||
query = str(query) if query else ""
|
||||
|
||||
if not query:
|
||||
return {"output": "错误:Agent 节点未收到用户输入", "status": "error"}
|
||||
|
||||
# 2. 解析 system_prompt(支持模板变量)
|
||||
raw_prompt = node_data.get("system_prompt", "你是一个有用的AI助手。")
|
||||
try:
|
||||
formatted_prompt = raw_prompt.format(**input_data)
|
||||
except (KeyError, ValueError):
|
||||
formatted_prompt = raw_prompt
|
||||
|
||||
# 3. 构建 Agent 配置
|
||||
llm_config = AgentLLMConfig(
|
||||
provider=node_data.get("provider", "openai"),
|
||||
model=node_data.get("model", "gpt-4o-mini"),
|
||||
temperature=float(node_data.get("temperature", 0.7)),
|
||||
max_iterations=int(node_data.get("max_iterations", 10)),
|
||||
)
|
||||
# 允许节点内联 api_key/base_url
|
||||
if node_data.get("api_key"):
|
||||
llm_config.api_key = node_data["api_key"]
|
||||
if node_data.get("base_url"):
|
||||
llm_config.base_url = node_data["base_url"]
|
||||
|
||||
agent_config = AgentConfig(
|
||||
name=node_data.get("label", "agent_node"),
|
||||
system_prompt=formatted_prompt,
|
||||
llm=llm_config,
|
||||
tools=AgentToolConfig(
|
||||
include_tools=node_data.get("tools", []),
|
||||
exclude_tools=node_data.get("exclude_tools", []),
|
||||
),
|
||||
memory={
|
||||
"enabled": node_data.get("memory", True),
|
||||
"persist_to_db": node_data.get("memory", True),
|
||||
},
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
# 4. 执行 Agent
|
||||
runtime = AgentRuntime(
|
||||
config=agent_config,
|
||||
execution_logger=execution_logger,
|
||||
on_tool_executed=on_tool_executed,
|
||||
)
|
||||
|
||||
result = await runtime.run(query)
|
||||
|
||||
# 5. 返回结果(兼容工作流引擎的输出格式)
|
||||
if result.success:
|
||||
return {
|
||||
"output": result.content,
|
||||
"status": "success",
|
||||
"agent_meta": {
|
||||
"iterations": result.iterations_used,
|
||||
"tool_calls": result.tool_calls_made,
|
||||
"truncated": result.truncated,
|
||||
},
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"output": result.content,
|
||||
"status": "error",
|
||||
"error": result.error,
|
||||
}
|
||||
137
backend/app/api/agent_chat.py
Normal file
137
backend/app/api/agent_chat.py
Normal file
@@ -0,0 +1,137 @@
|
||||
"""
|
||||
Agent 独立聊天 API — 不依赖工作流 DAG,直接与 Agent Runtime 对话。
|
||||
|
||||
POST /api/v1/agent-chat/bare
|
||||
{"message": "你好,帮我..."}
|
||||
→ {"content": "...", "iterations": 3, "tool_calls": 5}
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, Optional
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from pydantic import BaseModel
|
||||
|
||||
from app.core.database import get_db
|
||||
from sqlalchemy.orm import Session
|
||||
from app.api.auth import get_current_user
|
||||
from app.models.user import User
|
||||
from app.models.agent import Agent
|
||||
from app.agent_runtime import (
|
||||
AgentRuntime,
|
||||
AgentConfig,
|
||||
AgentLLMConfig,
|
||||
AgentToolConfig,
|
||||
)
|
||||
from app.core.config import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/api/v1/agent-chat", tags=["agent-chat"])
|
||||
|
||||
|
||||
class ChatRequest(BaseModel):
|
||||
message: str
|
||||
session_id: Optional[str] = None
|
||||
model: Optional[str] = None
|
||||
temperature: Optional[float] = None
|
||||
max_iterations: Optional[int] = None
|
||||
|
||||
|
||||
class ChatResponse(BaseModel):
|
||||
content: str
|
||||
iterations_used: int
|
||||
tool_calls_made: int
|
||||
truncated: bool
|
||||
session_id: str
|
||||
agent_id: Optional[str] = None
|
||||
|
||||
|
||||
@router.post("/bare", response_model=ChatResponse)
|
||||
async def chat_bare(
|
||||
req: ChatRequest,
|
||||
current_user: User = Depends(get_current_user),
|
||||
):
|
||||
"""无需 Agent 配置,使用默认设置直接对话。"""
|
||||
config = AgentConfig(
|
||||
name="bare_agent",
|
||||
system_prompt="你是一个有用的AI助手。请使用可用工具来帮助用户完成任务。",
|
||||
llm=AgentLLMConfig(
|
||||
model=req.model or (
|
||||
"gpt-4o-mini" if settings.OPENAI_API_KEY and settings.OPENAI_API_KEY != "your-openai-api-key"
|
||||
else "deepseek-v4-flash"
|
||||
),
|
||||
temperature=req.temperature or 0.7,
|
||||
max_iterations=req.max_iterations or 10,
|
||||
),
|
||||
user_id=current_user.id,
|
||||
)
|
||||
runtime = AgentRuntime(config=config)
|
||||
result = await runtime.run(req.message)
|
||||
|
||||
return ChatResponse(
|
||||
content=result.content,
|
||||
iterations_used=result.iterations_used,
|
||||
tool_calls_made=result.tool_calls_made,
|
||||
truncated=result.truncated,
|
||||
session_id=runtime.context.session_id,
|
||||
)
|
||||
|
||||
|
||||
@router.post("/{agent_id}", response_model=ChatResponse)
|
||||
async def chat_with_agent(
|
||||
agent_id: str,
|
||||
req: ChatRequest,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: Session = Depends(get_db),
|
||||
):
|
||||
"""与指定的 Agent 对话。Agent 的工作流配置会用于构建 Runtime。"""
|
||||
agent = db.query(Agent).filter(Agent.id == agent_id).first()
|
||||
if not agent:
|
||||
raise HTTPException(status_code=404, detail="Agent 不存在")
|
||||
if agent.user_id and agent.user_id != current_user.id and current_user.role != "admin":
|
||||
raise HTTPException(status_code=403, detail="无权访问该 Agent")
|
||||
|
||||
# 从 Agent 配置构建 Runtime
|
||||
wc = agent.workflow_config or {}
|
||||
nodes = wc.get("nodes", [])
|
||||
# 查找 agent 节点的配置(或第一个 llm 节点的配置)
|
||||
agent_node_cfg = _find_agent_node_config(nodes)
|
||||
|
||||
config = AgentConfig(
|
||||
name=agent.name,
|
||||
system_prompt=agent_node_cfg.get("system_prompt") or agent.description or "你是一个有用的AI助手。",
|
||||
llm=AgentLLMConfig(
|
||||
provider=agent_node_cfg.get("provider", "openai"),
|
||||
model=req.model or agent_node_cfg.get("model", "gpt-4o-mini"),
|
||||
temperature=req.temperature or float(agent_node_cfg.get("temperature", 0.7)),
|
||||
max_iterations=req.max_iterations or int(agent_node_cfg.get("max_iterations", 10)),
|
||||
),
|
||||
tools=AgentToolConfig(
|
||||
include_tools=agent_node_cfg.get("tools", []),
|
||||
exclude_tools=agent_node_cfg.get("exclude_tools", []),
|
||||
),
|
||||
user_id=current_user.id,
|
||||
)
|
||||
|
||||
runtime = AgentRuntime(config=config)
|
||||
result = await runtime.run(req.message)
|
||||
|
||||
return ChatResponse(
|
||||
content=result.content,
|
||||
iterations_used=result.iterations_used,
|
||||
tool_calls_made=result.tool_calls_made,
|
||||
truncated=result.truncated,
|
||||
session_id=runtime.context.session_id,
|
||||
agent_id=agent_id,
|
||||
)
|
||||
|
||||
|
||||
def _find_agent_node_config(nodes: list) -> Dict[str, Any]:
|
||||
"""从工作流节点列表中查找第一个 agent 类型或 llm 类型的节点配置。"""
|
||||
if not nodes:
|
||||
return {}
|
||||
for node in nodes:
|
||||
typ = node.get("type", "")
|
||||
if typ in ("agent", "llm", "template"):
|
||||
return node.get("data") or {}
|
||||
return {}
|
||||
@@ -201,7 +201,7 @@ async def startup_event():
|
||||
# 不抛出异常,允许应用继续启动
|
||||
|
||||
# 注册路由
|
||||
from app.api import auth, uploads, workflows, executions, websocket, execution_logs, data_sources, agents, platform_templates, model_configs, webhooks, template_market, batch_operations, collaboration, permissions, monitoring, alert_rules, node_test, node_templates, tools
|
||||
from app.api import auth, uploads, workflows, executions, websocket, execution_logs, data_sources, agents, platform_templates, model_configs, webhooks, template_market, batch_operations, collaboration, permissions, monitoring, alert_rules, node_test, node_templates, tools, agent_chat
|
||||
|
||||
app.include_router(auth.router)
|
||||
app.include_router(uploads.router)
|
||||
@@ -223,6 +223,7 @@ app.include_router(alert_rules.router)
|
||||
app.include_router(node_test.router)
|
||||
app.include_router(node_templates.router)
|
||||
app.include_router(tools.router)
|
||||
app.include_router(agent_chat.router)
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
@@ -50,6 +50,28 @@ def _is_retryable_llm_error(exc: Exception) -> bool:
|
||||
)
|
||||
|
||||
|
||||
def _assistant_message_for_tool_history(message: Any, tool_calls_dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""
|
||||
构造写入多轮 messages 的 assistant 条目。
|
||||
DeepSeek V4 思考模式 + 工具调用:下一轮请求必须把本轮返回的 reasoning_content 原样带回,
|
||||
否则会 400 invalid_request_error。
|
||||
"""
|
||||
entry: Dict[str, Any] = {
|
||||
"role": "assistant",
|
||||
"content": message.content,
|
||||
}
|
||||
if tool_calls_dicts:
|
||||
entry["tool_calls"] = tool_calls_dicts
|
||||
rc = getattr(message, "reasoning_content", None)
|
||||
if rc is None:
|
||||
extra = getattr(message, "model_extra", None) or {}
|
||||
if isinstance(extra, dict):
|
||||
rc = extra.get("reasoning_content")
|
||||
if rc is not None:
|
||||
entry["reasoning_content"] = rc
|
||||
return entry
|
||||
|
||||
|
||||
def _extract_dsml_parameter_args(chunk: str) -> Dict[str, str]:
|
||||
"""
|
||||
DeepSeek 新版 DSML 常用「parameter」而非「invoke_arg」:
|
||||
@@ -635,6 +657,7 @@ class LLMService:
|
||||
tool_choice: Optional[str] = None,
|
||||
on_tool_executed: Optional[Callable[[str], Awaitable[None]]] = None,
|
||||
request_timeout: Optional[float] = None,
|
||||
extra_body: Optional[Dict[str, Any]] = None,
|
||||
) -> str:
|
||||
"""
|
||||
调用OpenAI API,支持工具调用
|
||||
@@ -685,6 +708,8 @@ class LLMService:
|
||||
"temperature": temperature,
|
||||
"max_tokens": max_tokens
|
||||
}
|
||||
if extra_body:
|
||||
create_kwargs["extra_body"] = extra_body
|
||||
|
||||
if iteration == 0:
|
||||
# 转换工具格式为OpenAI格式
|
||||
@@ -755,11 +780,7 @@ class LLMService:
|
||||
},
|
||||
})
|
||||
|
||||
messages.append({
|
||||
"role": "assistant",
|
||||
"content": message.content,
|
||||
"tool_calls": tool_calls_dicts,
|
||||
})
|
||||
messages.append(_assistant_message_for_tool_history(message, tool_calls_dicts))
|
||||
|
||||
if not tool_calls_dicts:
|
||||
final_content = message.content or ""
|
||||
@@ -862,6 +883,7 @@ class LLMService:
|
||||
tool_choice: Optional[str] = None,
|
||||
on_tool_executed: Optional[Callable[[str], Awaitable[None]]] = None,
|
||||
request_timeout: Optional[float] = None,
|
||||
extra_body: Optional[Dict[str, Any]] = None,
|
||||
) -> str:
|
||||
"""
|
||||
调用DeepSeek API,支持工具调用(DeepSeek兼容OpenAI API格式)
|
||||
@@ -880,6 +902,7 @@ class LLMService:
|
||||
tool_choice=tool_choice,
|
||||
on_tool_executed=on_tool_executed,
|
||||
request_timeout=request_timeout,
|
||||
extra_body=extra_body,
|
||||
)
|
||||
|
||||
async def call_llm_with_tools(
|
||||
|
||||
@@ -983,6 +983,64 @@ class WorkflowEngine:
|
||||
return ctx.get("assistant_display_name")
|
||||
return None
|
||||
|
||||
def _format_prior_conversation_for_llm(
|
||||
self, input_data: Dict[str, Any], original_prompt_template: str
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Agent 多轮对话:执行请求若携带 conversation_history,而提示词未使用
|
||||
{{memory.conversation_history}} 等占位符,则在此处拼进最终 prompt,避免模型「失忆」。
|
||||
"""
|
||||
t = original_prompt_template or ""
|
||||
if "memory.conversation_history" in t or re.search(
|
||||
r"\{\{[^}]*conversation_history[^}]*\}\}", t
|
||||
):
|
||||
return None
|
||||
|
||||
hist: Any = None
|
||||
if isinstance(input_data, dict):
|
||||
hist = input_data.get("conversation_history")
|
||||
if hist is None and isinstance(input_data.get("memory"), dict):
|
||||
hist = input_data["memory"].get("conversation_history")
|
||||
if hist is None and isinstance(input_data.get("right"), dict):
|
||||
r = input_data["right"]
|
||||
hist = r.get("conversation_history")
|
||||
if hist is None and isinstance(r.get("memory"), dict):
|
||||
hist = r["memory"].get("conversation_history")
|
||||
|
||||
if not hist or not isinstance(hist, list):
|
||||
return None
|
||||
|
||||
lines: List[str] = []
|
||||
max_turns = 24
|
||||
for msg in hist[-max_turns:]:
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
role = msg.get("role", "")
|
||||
content = msg.get("content", "")
|
||||
if content is None:
|
||||
continue
|
||||
if not isinstance(content, str):
|
||||
content = str(content)
|
||||
content = content.strip()
|
||||
if not content:
|
||||
continue
|
||||
if role == "user":
|
||||
lines.append(f"用户:{content}")
|
||||
elif role in ("assistant", "agent"):
|
||||
lines.append(f"助手:{content}")
|
||||
else:
|
||||
lines.append(f"{role}:{content}")
|
||||
|
||||
if not lines:
|
||||
return None
|
||||
|
||||
body = "\n".join(lines)
|
||||
max_chars = 12000
|
||||
if len(body) > max_chars:
|
||||
body = body[-max_chars:] + "\n…(更早的对话已截断)"
|
||||
|
||||
return f"【本轮之前的对话】\n{body}"
|
||||
|
||||
def _resolve_vector_db_query_embedding(
|
||||
self, input_data: Any, query_vector_config: Any
|
||||
) -> Optional[List[Any]]:
|
||||
@@ -1605,6 +1663,8 @@ class WorkflowEngine:
|
||||
logger.info(f"[rjb] 使用JSON或字符串转换: user_query={user_query}")
|
||||
|
||||
logger.info(f"[rjb] 最终提取的user_query: {user_query}")
|
||||
|
||||
history_block = self._format_prior_conversation_for_llm(input_data, prompt)
|
||||
|
||||
# 如果prompt中没有占位符,或者仍有未填充的变量,将用户输入附加到prompt
|
||||
is_generic_instruction = False # 初始化变量
|
||||
@@ -1633,25 +1693,43 @@ class WorkflowEngine:
|
||||
|
||||
if is_generic_instruction:
|
||||
# 如果是通用指令,直接使用用户输入作为prompt
|
||||
formatted_prompt = str(user_query)
|
||||
if history_block:
|
||||
formatted_prompt = f"{history_block}\n\n{str(user_query)}"
|
||||
else:
|
||||
formatted_prompt = str(user_query)
|
||||
logger.info(f"[rjb] 检测到通用指令,直接使用用户输入作为prompt: {user_query[:50] if user_query else 'None'}")
|
||||
else:
|
||||
# 否则,将用户输入附加到prompt
|
||||
formatted_prompt = f"{formatted_prompt}\n\n{user_query}"
|
||||
if history_block:
|
||||
formatted_prompt = f"{formatted_prompt}\n\n{history_block}\n\n{user_query}"
|
||||
else:
|
||||
formatted_prompt = f"{formatted_prompt}\n\n{user_query}"
|
||||
logger.info(f"[rjb] 非通用指令,将用户输入附加到prompt")
|
||||
else:
|
||||
# 如果没有提取到用户查询,附加整个input_data
|
||||
formatted_prompt = f"{formatted_prompt}\n\n{json_module.dumps(input_data, ensure_ascii=False)}"
|
||||
tail = json_module.dumps(input_data, ensure_ascii=False)
|
||||
if history_block:
|
||||
formatted_prompt = f"{formatted_prompt}\n\n{history_block}\n\n{tail}"
|
||||
else:
|
||||
formatted_prompt = f"{formatted_prompt}\n\n{tail}"
|
||||
elif has_unfilled_variables or re.search(r'\{\{[^}]+\}\}', formatted_prompt):
|
||||
# 如果有占位符但未填充,先尝试清理所有未填充的模板变量
|
||||
# 使用正则表达式替换所有 {{...}} 格式的未填充变量
|
||||
formatted_prompt = re.sub(r'\{\{[^}]+\}\}', '', formatted_prompt)
|
||||
# 如果有占位符但未填充,附加用户需求说明
|
||||
if user_query:
|
||||
formatted_prompt = f"{formatted_prompt}\n\n用户需求:{user_query}\n\n请根据用户需求来完成任务。"
|
||||
user_tail = f"用户需求:{user_query}\n\n请根据用户需求来完成任务。"
|
||||
if history_block:
|
||||
formatted_prompt = f"{formatted_prompt}\n\n{history_block}\n\n{user_tail}"
|
||||
else:
|
||||
formatted_prompt = f"{formatted_prompt}\n\n{user_tail}"
|
||||
else:
|
||||
# 如果没有用户查询,附加整个input_data
|
||||
formatted_prompt = f"{formatted_prompt}\n\n输入数据:{json_module.dumps(input_data, ensure_ascii=False)}\n\n请根据输入数据来完成任务。"
|
||||
data_tail = f"输入数据:{json_module.dumps(input_data, ensure_ascii=False)}\n\n请根据输入数据来完成任务。"
|
||||
if history_block:
|
||||
formatted_prompt = f"{formatted_prompt}\n\n{history_block}\n\n{data_tail}"
|
||||
else:
|
||||
formatted_prompt = f"{formatted_prompt}\n\n{data_tail}"
|
||||
|
||||
logger.info(f"[rjb] LLM节点prompt格式化: node_id={node_id}, original_prompt='{prompt[:50] if len(prompt) > 50 else prompt}', has_any_placeholder={has_any_placeholder}, user_query={user_query}, is_generic_instruction={is_generic_instruction}, final_prompt前200字符='{formatted_prompt[:200] if len(formatted_prompt) > 200 else formatted_prompt}'")
|
||||
prompt = formatted_prompt
|
||||
@@ -1720,6 +1798,9 @@ class WorkflowEngine:
|
||||
llm_extra_kw["api_key"] = api_key
|
||||
if base_url is not None:
|
||||
llm_extra_kw["base_url"] = base_url
|
||||
_xb = node_data.get("extra_body")
|
||||
if isinstance(_xb, dict) and _xb:
|
||||
llm_extra_kw["extra_body"] = _xb
|
||||
|
||||
# 记录实际发送给LLM的prompt
|
||||
logger.info(f"[rjb] 准备调用LLM: node_id={node_id}, provider={provider}, model={model}, prompt前200字符='{prompt[:200] if len(prompt) > 200 else prompt}'")
|
||||
@@ -1821,7 +1902,45 @@ class WorkflowEngine:
|
||||
'status': 'failed',
|
||||
'error': f'LLM调用失败: {str(e)}'
|
||||
}
|
||||
|
||||
|
||||
elif node_type == 'agent':
|
||||
# Agent 节点:自主 ReAct 循环,支持多步工具调用
|
||||
if self.logger:
|
||||
self.logger.info(
|
||||
"Agent 节点开始执行",
|
||||
data={"node_id": node_id, "input": input_data},
|
||||
)
|
||||
try:
|
||||
from app.agent_runtime.workflow_integration import run_agent_node
|
||||
|
||||
_agent_on_tool = None
|
||||
if hasattr(self, '_on_tool_executed_budget'):
|
||||
_agent_on_tool = self._on_tool_executed_budget
|
||||
|
||||
result = await run_agent_node(
|
||||
node_data=node.get("data", {}),
|
||||
input_data=input_data,
|
||||
execution_logger=self.logger,
|
||||
user_id=self.trusted_model_config_user_id,
|
||||
on_tool_executed=_agent_on_tool,
|
||||
)
|
||||
if self.logger:
|
||||
duration = int((time.time() - start_time) * 1000)
|
||||
self.logger.log_node_complete(
|
||||
node_id, node_type, result.get("output"), duration,
|
||||
)
|
||||
return result
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
duration = int((time.time() - start_time) * 1000)
|
||||
self.logger.log_node_error(node_id, node_type, e, duration)
|
||||
logger.error(f"Agent 节点执行失败: {e}", exc_info=True)
|
||||
return {
|
||||
"output": None,
|
||||
"status": "failed",
|
||||
"error": f"Agent 执行失败: {e}",
|
||||
}
|
||||
|
||||
elif node_type == 'condition':
|
||||
# 条件节点:判断分支(output 必须透传上游 dict,否则 sourceHandle true/false 下游只收到布尔值,丢失 reply/memory)
|
||||
condition = node.get('data', {}).get('condition', '')
|
||||
@@ -1892,6 +2011,9 @@ class WorkflowEngine:
|
||||
expanded_input.update(_bp)
|
||||
for _k in ('true', 'false', '_condition_result', '_condition_error'):
|
||||
expanded_input.pop(_k, None)
|
||||
# 展开 left:双入边 transform 的上游一路常挂在 sourceHandle=left(另一路为 LLM/code 的 right)
|
||||
if isinstance(expanded_input.get('left'), dict):
|
||||
expanded_input.update(expanded_input['left'])
|
||||
# 展开 right:merge / json-parse 后 reply、user_profile 常在 right 或嵌套 JSON 字符串中
|
||||
if isinstance(expanded_input.get('right'), dict):
|
||||
expanded_input.update(expanded_input['right'])
|
||||
@@ -2365,7 +2487,6 @@ class WorkflowEngine:
|
||||
|
||||
try:
|
||||
import os
|
||||
import json
|
||||
import base64
|
||||
from pathlib import Path
|
||||
|
||||
@@ -2806,7 +2927,6 @@ class WorkflowEngine:
|
||||
if queue_type == 'rabbitmq':
|
||||
# RabbitMQ实现
|
||||
import aio_pika
|
||||
import json
|
||||
|
||||
# 获取RabbitMQ配置
|
||||
host = replace_variables(node_data.get('host', 'localhost'), input_data)
|
||||
@@ -2885,7 +3005,6 @@ class WorkflowEngine:
|
||||
elif queue_type == 'kafka':
|
||||
# Kafka实现
|
||||
from kafka import KafkaProducer
|
||||
import json
|
||||
|
||||
# 获取Kafka配置
|
||||
bootstrap_servers = replace_variables(node_data.get('bootstrap_servers', 'localhost:9092'), input_data)
|
||||
@@ -3530,6 +3649,31 @@ class WorkflowEngine:
|
||||
if not isinstance(base_up, dict):
|
||||
base_up = {}
|
||||
memory['user_profile'] = {**base_up, **upd}
|
||||
|
||||
hb_upd = input_data.get('homework_board_update')
|
||||
if isinstance(hb_upd, str) and hb_upd.strip().startswith('{'):
|
||||
try:
|
||||
hb_upd = json_module.loads(hb_upd)
|
||||
except Exception:
|
||||
hb_upd = {}
|
||||
if not isinstance(hb_upd, dict):
|
||||
hb_upd = {}
|
||||
if hb_upd:
|
||||
ctx = memory.get('context')
|
||||
if not isinstance(ctx, dict):
|
||||
ctx = {}
|
||||
base_hb = ctx.get('homework_board')
|
||||
if not isinstance(base_hb, dict):
|
||||
base_hb = {}
|
||||
merged_hb = {**base_hb, **hb_upd}
|
||||
new_items = hb_upd.get('items')
|
||||
old_items = base_hb.get('items')
|
||||
if isinstance(new_items, list) and len(new_items) > 0:
|
||||
merged_hb['items'] = new_items
|
||||
elif isinstance(old_items, list):
|
||||
merged_hb['items'] = old_items
|
||||
ctx['homework_board'] = merged_hb
|
||||
memory['context'] = ctx
|
||||
|
||||
# 确保memory中有必要的字段
|
||||
if 'conversation_history' not in memory:
|
||||
@@ -4937,10 +5081,27 @@ class WorkflowEngine:
|
||||
try:
|
||||
if language.lower() == 'python':
|
||||
# 受限执行环境(禁止无 __builtins__,否则 isinstance 等不可用)
|
||||
local_vars = {'input_data': input_data, 'result': None}
|
||||
_code_globs = {'__builtins__': _CODE_NODE_SAFE_BUILTINS, 'hashlib': hashlib, 're': re}
|
||||
exec(code, _code_globs, local_vars)
|
||||
result = local_vars.get('result', local_vars.get('output', input_data))
|
||||
# 注入 loads/dumps;使用「globals == locals」同一命名空间 exec,
|
||||
# 避免嵌套函数 LOAD_GLOBAL 找不到仅在 locals 里的 loads,以及 json 作用域异常。
|
||||
_code_globs = {
|
||||
'__builtins__': _CODE_NODE_SAFE_BUILTINS,
|
||||
'hashlib': hashlib,
|
||||
're': re,
|
||||
'json': json,
|
||||
}
|
||||
shared_ns: Dict[str, Any] = dict(_code_globs)
|
||||
shared_ns.update(
|
||||
{
|
||||
'input_data': input_data,
|
||||
'result': None,
|
||||
'loads': json.loads,
|
||||
'dumps': json.dumps,
|
||||
}
|
||||
)
|
||||
exec(code, shared_ns, shared_ns)
|
||||
result = shared_ns.get(
|
||||
'result', shared_ns.get('output', input_data)
|
||||
)
|
||||
elif language.lower() == 'javascript':
|
||||
# JS 执行需要外部运行时,这里仅占位
|
||||
result = {
|
||||
|
||||
@@ -1,14 +1,23 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
创建或更新「学生作业管理助手」Agent:单链 Start → LLM → End。
|
||||
创建或更新「学生作业管理助手」Agent:Start → Cache 读 → Transform 合并 → LLM → Code 拆分 JSON →
|
||||
Transform 拼装 → Cache 写 → 输出。
|
||||
|
||||
侧重:记录作业项、截止日、优先级;跟进完成情况;温和督促与周回顾(不代写可提交的作业正文)。
|
||||
强化:**结构化 homework_board** 写入 `memory.context.homework_board`(Redis / 持久记忆合并)。
|
||||
|
||||
「学生作业管理助手2号」(名称含 **2号** 或 `HOMEWORK_FAST_AGENT=1`)额外侧重:**更长 Redis TTL**、收紧预算与工具轮次、默认 **deepseek-v4-flash**(可通过环境变量改)、DeepSeek **`extra_body` 关闭 thinking**(更快更稳的工具链)、Code 节点兜底避免整条失败。
|
||||
|
||||
「学生作业管理助手3号」(名称含 **3号** 或 `HOMEWORK_V3=1`):**基础设施与 2 号同档**(TTL、history 上限、8192 tokens、thinking 关闭等);提示词用**完整版**并追加 **知你客服14号记忆栈**说明(`user_memory_*`、四字段记忆包、与 `agent记忆实现方案.md` 对齐)。也可用 `scripts/create_homework_manager_agent_3.py` 一键创建。
|
||||
|
||||
用法:
|
||||
cd backend && .\\venv\\Scripts\\python.exe scripts/create_homework_manager_agent.py
|
||||
|
||||
环境变量:
|
||||
PLATFORM_BASE_URL, PLATFORM_USERNAME, PLATFORM_PASSWORD
|
||||
AGENT_NAME(默认 学生作业管理助手);示例:`AGENT_NAME=学生作业管理助手2号 HOMEWORK_LLM_MODEL=deepseek-v4-pro`
|
||||
AGENT_NAME(默认 学生作业管理助手);2 号:`AGENT_NAME=学生作业管理助手2号`;3 号:`AGENT_NAME=学生作业管理助手3号`
|
||||
HOMEWORK_FAST_AGENT=1(可选,显式启用 2 号快速档案)
|
||||
HOMEWORK_V3=1(可选,显式启用 3 号档案;通常用名称含「3号」即可)
|
||||
HOMEWORK_LLM_PROVIDER / HOMEWORK_LLM_MODEL / HOMEWORK_LLM_TIMEOUT(可选)
|
||||
"""
|
||||
from __future__ import annotations
|
||||
@@ -28,6 +37,18 @@ BASE = os.getenv("PLATFORM_BASE_URL", "http://127.0.0.1:8037").rstrip("/")
|
||||
USER = os.getenv("PLATFORM_USERNAME", "admin")
|
||||
PWD = os.getenv("PLATFORM_PASSWORD", "123456")
|
||||
AGENT_NAME = os.getenv("AGENT_NAME", "学生作业管理助手")
|
||||
FAST_PROFILE = "2号" in AGENT_NAME or os.getenv("HOMEWORK_FAST_AGENT", "").strip().lower() in (
|
||||
"1",
|
||||
"true",
|
||||
"yes",
|
||||
)
|
||||
V3_PROFILE = "3号" in AGENT_NAME or os.getenv("HOMEWORK_V3", "").strip().lower() in (
|
||||
"1",
|
||||
"true",
|
||||
"yes",
|
||||
)
|
||||
# 2 号 / 3 号共享:长 TTL、较高 max_tokens、可选关闭 thinking 等与「知你类」记忆工程对齐的基础设施
|
||||
ZHINI_STYLE_INFRA = bool(FAST_PROFILE or V3_PROFILE)
|
||||
|
||||
PROVIDER = os.getenv(
|
||||
"HOMEWORK_LLM_PROVIDER", os.getenv("ENTERPRISE_LLM_PROVIDER", "deepseek")
|
||||
@@ -35,23 +56,104 @@ PROVIDER = os.getenv(
|
||||
MODEL = os.getenv(
|
||||
"HOMEWORK_LLM_MODEL", os.getenv("ENTERPRISE_LLM_MODEL", "deepseek-v4-flash")
|
||||
)
|
||||
_DEFAULT_TIMEOUT = "120" if ZHINI_STYLE_INFRA else "180"
|
||||
REQ_TIMEOUT = max(
|
||||
30,
|
||||
int(
|
||||
os.getenv(
|
||||
"HOMEWORK_LLM_TIMEOUT", os.getenv("ENTERPRISE_LLM_TIMEOUT", "180")
|
||||
"HOMEWORK_LLM_TIMEOUT",
|
||||
os.getenv("ENTERPRISE_LLM_TIMEOUT", _DEFAULT_TIMEOUT),
|
||||
)
|
||||
),
|
||||
)
|
||||
if ZHINI_STYLE_INFRA:
|
||||
REQ_TIMEOUT = min(REQ_TIMEOUT, 150)
|
||||
|
||||
BUDGET_CONFIG = {
|
||||
"max_steps": 80,
|
||||
"max_llm_invocations": 6,
|
||||
"max_tool_calls": 20,
|
||||
}
|
||||
BUDGET_CONFIG = (
|
||||
{"max_steps": 80, "max_llm_invocations": 6, "max_tool_calls": 16}
|
||||
if ZHINI_STYLE_INFRA
|
||||
else {"max_steps": 100, "max_llm_invocations": 8, "max_tool_calls": 24}
|
||||
)
|
||||
|
||||
_CACHE_TTL = 1209600 if ZHINI_STYLE_INFRA else 604800
|
||||
_MAX_HISTORY_LENGTH = 48 if ZHINI_STYLE_INFRA else 40
|
||||
|
||||
HOMEWORK_TOOLS = ["file_read", "text_analyze", "datetime", "json_process"]
|
||||
|
||||
CODE_SPLIT_HOMEWORK_TAIL_JSON = r"""
|
||||
def _tail_json_obj(s):
|
||||
if not isinstance(s, str):
|
||||
return None
|
||||
t = s.strip()
|
||||
if not t:
|
||||
return None
|
||||
last_nl = t.rfind("\n")
|
||||
last_line = t[last_nl + 1 :].strip() if last_nl >= 0 else t
|
||||
if not last_line.startswith("{"):
|
||||
return None
|
||||
try:
|
||||
o = loads(last_line)
|
||||
return o if isinstance(o, dict) else None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _llm_text(inp):
|
||||
if isinstance(inp, str):
|
||||
return inp
|
||||
if isinstance(inp, dict):
|
||||
out = inp.get("output")
|
||||
if isinstance(out, str):
|
||||
return out
|
||||
if isinstance(out, dict):
|
||||
return str(out.get("output") or out.get("text") or out.get("content") or "")
|
||||
if out is not None:
|
||||
return str(out)
|
||||
return str(inp)
|
||||
|
||||
|
||||
try:
|
||||
raw = _llm_text(input_data)
|
||||
obj = _tail_json_obj(raw)
|
||||
hb = {}
|
||||
if obj:
|
||||
hb = obj.get("homework_board")
|
||||
if not isinstance(hb, dict):
|
||||
hb = {}
|
||||
reply_visible = raw.strip() if isinstance(raw, str) else str(raw).strip()
|
||||
if obj and isinstance(raw, str):
|
||||
lines = raw.splitlines()
|
||||
while lines and not lines[-1].strip():
|
||||
lines.pop()
|
||||
if lines and lines[-1].strip().startswith("{"):
|
||||
lines.pop()
|
||||
reply_visible = "\n".join(lines).strip()
|
||||
result = {"reply": reply_visible, "homework_board": hb}
|
||||
except Exception:
|
||||
try:
|
||||
_raw = _llm_text(input_data)
|
||||
_reply = (_raw.strip() if isinstance(_raw, str) else str(_raw)).strip()
|
||||
except Exception:
|
||||
_reply = ""
|
||||
result = {"reply": _reply, "homework_board": {}}
|
||||
"""
|
||||
|
||||
# 与 agent记忆实现方案 / 知你客服线对齐:末行 JSON 含 user_profile、禁止无视已有快照与对话
|
||||
HOMEWORK_PROMPT_ZHINI_ALIGN = """
|
||||
【与知你记忆方案对齐 · 必守】
|
||||
- 末行单行 JSON 须**完整可解析**。除 `homework_board` 外**必须**含 `user_profile`:用户若已说「我叫…」「我的名字是…」「叫我…」等,须写入 "user_profile":{"name":"…"};未获知则 "user_profile":{}。
|
||||
- 先读上方「最近对话」「作业快照」再作答:用户问「有什么作业」「我有什么语文作业」等时,若快照或对话里**已有**科目/条目,须**逐条复述**,禁止说「没有记录」「暂时没有」或逼用户从零重述,除非快照与对话确为空。
|
||||
- 防截断:表格与寒暄从简;**宁可少写修饰语也不得省略末行 JSON**;`homework_board.items` 与正文已列条数一致,禁止用空 `items` 覆盖历史条目。
|
||||
"""
|
||||
|
||||
# 仅 3 号追加:显式对标知你客服 14 号 / agent记忆实现方案 中的记忆栈描述
|
||||
HOMEWORK_V3_ZHINI14_APPEND = """
|
||||
【3号 · 知你客服14号记忆方案(工程对齐)】
|
||||
- 与知你客服14号、`agent记忆实现方案.md` 一致:**Cache 键** `user_memory_{user_id}`;执行须带稳定 **`user_id`**(预览端按 Agent 维度持久化),避免退化为 `default` 串会话。
|
||||
- **记忆包四字段**:`conversation_history`、`conversation_summary`、`user_profile`、`context`;作业结构化数据在 **`context.homework_board`**(与 2 号相同);引擎对末行 JSON 的 `user_profile` 与 Cache 合并逻辑与知你主线一致。
|
||||
- **Redis + 可选 MySQL**:节点 TTL 见配置;平台开启 `MEMORY_PERSIST_DB_ENABLED` 时与 `persistent_user_memories` 对齐合并,冷启动仍可拉回。
|
||||
"""
|
||||
|
||||
|
||||
def _homework_prompt(agent_display_name: str) -> str:
|
||||
return f"""你是「{agent_display_name}」,帮助学生**记作业**与**监督完成**,语气友好、具体、可执行。
|
||||
@@ -72,6 +174,40 @@ def _homework_prompt(agent_display_name: str) -> str:
|
||||
【交互习惯】
|
||||
- 用户只说「记一下数学作业」时,主动追问截止日与具体要求(一次问 1–2 个点,避免审问感)。
|
||||
- 用户汇报「做完了」时,确认是否需拍照/上传检查清单,并建议归档到下一条任务前的小结一句话。
|
||||
|
||||
【持久记忆(必须利用)】
|
||||
- 当前用户画像:{{memory.user_profile}}
|
||||
- 历史摘要:{{memory.conversation_summary}}
|
||||
- 最近历史:{{memory.conversation_history}}
|
||||
- **已知结构化作业快照(优先以此为准,可与正文互相补充)**:{{memory.context.homework_board}}
|
||||
- 回答前先结合历史判断:本轮是否在“延续上一轮作业条目”。若是,不要重复问已确认信息(如科目、截止日期)。
|
||||
- 若上一轮你已经列出作业清单,而本轮用户只补充了「截止时间/科目/完成状态」中的一部分,必须把该信息回填到上一轮清单并给出“更新后的清单”;禁止再问“具体有哪些作业”。
|
||||
- 当历史中已出现明确作业条目(如 4 条作业列表)时,默认这些条目继续有效,除非用户明确说“作业变了/重置”。
|
||||
{HOMEWORK_PROMPT_ZHINI_ALIGN}
|
||||
【结构化记忆(强制 · 机器可读)】
|
||||
- 在正文结束后,**最后单独一行**输出**恰好一行**合法 JSON(勿 markdown 围栏),格式示例:
|
||||
{{"homework_board":{{"subject":"语文","deadline_text":"2026-05-01","items":[{{"title":"写生字","detail":"第八课"}}],"notes":""}},"user_profile":{{}}}}
|
||||
- `homework_board` 必须与正文一致;若本轮用户只补充截止日/科目,须在 `homework_board` 中**合并更新**已有 `items`(可参考上面的快照与对话),**禁止用空列表覆盖已有条目**。
|
||||
- 该行仅供系统解析;正文不要复述该行 JSON。
|
||||
"""
|
||||
|
||||
|
||||
def _homework_prompt_fast(agent_display_name: str) -> str:
|
||||
return f"""你是「{agent_display_name}」,帮助学生**记作业**与**跟进度**;回复简短、可执行、中文优先。
|
||||
|
||||
【持久记忆 — 先读后答】
|
||||
- 画像:{{memory.user_profile}}
|
||||
- 摘要:{{memory.conversation_summary}}
|
||||
- 最近对话:{{memory.conversation_history}}
|
||||
- **作业快照 homework_board(优先采信,勿臆测)**:{{memory.context.homework_board}}
|
||||
|
||||
【工具 — 省延迟】仅当消息里出现**上传文件的工作区路径列表**时才调用 file_read;无附件时不要调用 file_read。需要当前时间用 datetime;结构化整理可用 json_process。
|
||||
|
||||
【原则】不代写可提交正文;延续上一轮时不要重复追问已确认的科目/清单;用户只改截止日或状态时合并更新清单。
|
||||
{HOMEWORK_PROMPT_ZHINI_ALIGN}
|
||||
【末行 JSON — 强制】正文结束后**单独一行**合法 JSON(勿 markdown 围栏),例如:
|
||||
{{"homework_board":{{"subject":"…","deadline_text":"…","items":[{{"title":"…","detail":"…"}}],"notes":"…"}},"user_profile":{{}}}}
|
||||
须与正文一致;**合并**已有 items,禁止用空列表覆盖历史条目。
|
||||
"""
|
||||
|
||||
|
||||
@@ -97,32 +233,148 @@ def _sanitize_edges(edges: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
|
||||
|
||||
def build_workflow() -> Dict[str, Any]:
|
||||
llm_pos: Tuple[int, int] = (380, 220)
|
||||
llm_pos: Tuple[int, int] = (680, 220)
|
||||
if FAST_PROFILE:
|
||||
_prompt = _homework_prompt_fast(AGENT_NAME)
|
||||
elif V3_PROFILE:
|
||||
_prompt = _homework_prompt(AGENT_NAME) + HOMEWORK_V3_ZHINI14_APPEND
|
||||
else:
|
||||
_prompt = _homework_prompt(AGENT_NAME)
|
||||
_llm_temp = 0.22 if FAST_PROFILE else (0.25 if V3_PROFILE else 0.3)
|
||||
_llm_mti = 6 if FAST_PROFILE else (8 if V3_PROFILE else 10)
|
||||
_llm_data: Dict[str, Any] = {
|
||||
"label": "作业管理",
|
||||
"prompt": _prompt,
|
||||
"provider": PROVIDER,
|
||||
"model": MODEL,
|
||||
"temperature": _llm_temp,
|
||||
"request_timeout": REQ_TIMEOUT,
|
||||
"enable_tools": True,
|
||||
"tools": list(HOMEWORK_TOOLS),
|
||||
"selected_tools": list(HOMEWORK_TOOLS),
|
||||
"max_tool_iterations": _llm_mti,
|
||||
}
|
||||
if ZHINI_STYLE_INFRA:
|
||||
# 避免截断末行 JSON → homework_board / user_profile 无法落库
|
||||
_llm_data["max_tokens"] = 8192
|
||||
if ZHINI_STYLE_INFRA and PROVIDER.strip().lower() == "deepseek":
|
||||
_llm_data["extra_body"] = {"thinking": {"type": "disabled"}}
|
||||
|
||||
nodes: List[Dict[str, Any]] = [
|
||||
{"id": "start-1", "type": "start", "position": {"x": 80, "y": 220}, "data": {"label": "开始"}},
|
||||
{
|
||||
"id": "cache-query",
|
||||
"type": "cache",
|
||||
"position": {"x": 300, "y": 220},
|
||||
"data": {
|
||||
"label": "读取记忆",
|
||||
"operation": "get",
|
||||
"key": "user_memory_{user_id}",
|
||||
"ttl": _CACHE_TTL,
|
||||
"default_value": "{\"conversation_history\": [], \"conversation_summary\": \"\", \"user_profile\": {}, \"context\": {}}",
|
||||
"input_variables": [],
|
||||
"output_variables": [],
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": "transform-merge",
|
||||
"type": "transform",
|
||||
"position": {"x": 510, "y": 220},
|
||||
"data": {
|
||||
"label": "合并输入与记忆",
|
||||
"mode": "merge",
|
||||
"mapping": {
|
||||
"query": "{{query}}",
|
||||
"user_input": "{{query}}",
|
||||
"user_id": "{{user_id}}",
|
||||
"timestamp": "{{timestamp}}",
|
||||
"attachments": "{{attachments}}",
|
||||
"memory": "{{output}}",
|
||||
"conversation_history": "{{output.conversation_history}}",
|
||||
"user_profile": "{{output.user_profile}}",
|
||||
"context": "{{output.context}}",
|
||||
},
|
||||
"input_variables": [],
|
||||
"output_variables": [],
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": "llm-homework",
|
||||
"type": "llm",
|
||||
"position": {"x": llm_pos[0], "y": llm_pos[1]},
|
||||
"data": dict(_llm_data),
|
||||
},
|
||||
{
|
||||
"id": "code-split-homework-json",
|
||||
"type": "code",
|
||||
"position": {"x": llm_pos[0] + 260, "y": 220},
|
||||
"data": {
|
||||
"label": "作业管理",
|
||||
"prompt": _homework_prompt(AGENT_NAME),
|
||||
"provider": PROVIDER,
|
||||
"model": MODEL,
|
||||
"temperature": 0.3,
|
||||
"request_timeout": REQ_TIMEOUT,
|
||||
"enable_tools": True,
|
||||
"tools": list(HOMEWORK_TOOLS),
|
||||
"selected_tools": list(HOMEWORK_TOOLS),
|
||||
"max_tool_iterations": 10,
|
||||
"label": "拆分正文与homework_board",
|
||||
"language": "python",
|
||||
"code": CODE_SPLIT_HOMEWORK_TAIL_JSON,
|
||||
"timeout": 20,
|
||||
},
|
||||
},
|
||||
{"id": "end-1", "type": "end", "position": {"x": llm_pos[0] + 260, "y": 220}, "data": {"label": "结束"}},
|
||||
{
|
||||
"id": "transform-build-append",
|
||||
"type": "transform",
|
||||
"position": {"x": llm_pos[0] + 520, "y": 220},
|
||||
"data": {
|
||||
"label": "拼装记忆更新",
|
||||
"mode": "merge",
|
||||
"mapping": {
|
||||
"query": "{{query}}",
|
||||
"user_input": "{{user_input}}",
|
||||
"user_id": "{{user_id}}",
|
||||
"timestamp": "{{timestamp}}",
|
||||
"memory": "{{memory}}",
|
||||
"output": "{{reply}}",
|
||||
"homework_board_update": "{{homework_board}}",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": "cache-update-append",
|
||||
"type": "cache",
|
||||
"position": {"x": llm_pos[0] + 780, "y": 220},
|
||||
"data": {
|
||||
"label": "写回记忆(追加)",
|
||||
"operation": "set",
|
||||
"key": "user_memory_{user_id}",
|
||||
"ttl": _CACHE_TTL,
|
||||
"max_history_length": _MAX_HISTORY_LENGTH,
|
||||
"value": "{\"conversation_summary\": (memory.get(\"conversation_summary\") or \"\"), \"conversation_history\": (memory.get(\"conversation_history\") or []) + [{\"role\": \"user\", \"content\": \"{{user_input}}\", \"timestamp\": \"{{timestamp}}\"}, {\"role\": \"assistant\", \"content\": \"{{output}}\", \"timestamp\": \"{{timestamp}}\"}], \"user_profile\": memory.get(\"user_profile\", {}), \"context\": memory.get(\"context\", {})}",
|
||||
"input_variables": [],
|
||||
"output_variables": [],
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": "transform-output-format",
|
||||
"type": "transform",
|
||||
"position": {"x": llm_pos[0] + 1040, "y": 220},
|
||||
"data": {
|
||||
"label": "输出格式",
|
||||
"mode": "merge",
|
||||
"mapping": {
|
||||
"reply": "{{output}}",
|
||||
"output": "{{output}}",
|
||||
"result": "{{output}}",
|
||||
},
|
||||
},
|
||||
},
|
||||
{"id": "end-1", "type": "end", "position": {"x": llm_pos[0] + 1300, "y": 220}, "data": {"label": "结束", "output_format": "text"}},
|
||||
]
|
||||
edges = _sanitize_edges(
|
||||
[
|
||||
{"source": "start-1", "target": "llm-homework", "sourceHandle": "right", "targetHandle": "left"},
|
||||
{"source": "llm-homework", "target": "end-1", "sourceHandle": "right", "targetHandle": "left"},
|
||||
{"source": "start-1", "target": "cache-query", "sourceHandle": "right", "targetHandle": "left"},
|
||||
{"source": "cache-query", "target": "transform-merge", "sourceHandle": "right", "targetHandle": "left"},
|
||||
{"source": "transform-merge", "target": "llm-homework", "sourceHandle": "right", "targetHandle": "left"},
|
||||
{"source": "transform-merge", "target": "transform-build-append", "sourceHandle": "left", "targetHandle": "left"},
|
||||
{"source": "llm-homework", "target": "code-split-homework-json", "sourceHandle": "right", "targetHandle": "left"},
|
||||
{"source": "code-split-homework-json", "target": "transform-build-append", "sourceHandle": "right", "targetHandle": "left"},
|
||||
{"source": "transform-build-append", "target": "cache-update-append", "sourceHandle": "right", "targetHandle": "left"},
|
||||
{"source": "cache-update-append", "target": "transform-output-format", "sourceHandle": "right", "targetHandle": "left"},
|
||||
{"source": "transform-output-format", "target": "end-1", "sourceHandle": "right", "targetHandle": "left"},
|
||||
]
|
||||
)
|
||||
return {"nodes": nodes, "edges": edges}
|
||||
@@ -170,10 +422,26 @@ def main() -> int:
|
||||
return 1
|
||||
h = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
|
||||
|
||||
_max_tool_it = 6 if FAST_PROFILE else (8 if V3_PROFILE else 10)
|
||||
if FAST_PROFILE:
|
||||
_profile_note = (
|
||||
f"快速档案(2号):TTL {_CACHE_TTL}s,history≤{_MAX_HISTORY_LENGTH},工具轮≤{_max_tool_it},"
|
||||
f"budget {BUDGET_CONFIG};DeepSeek 关闭 thinking(若适用)。"
|
||||
)
|
||||
elif V3_PROFILE:
|
||||
_profile_note = (
|
||||
f"3号:基于2号基础设施(TTL {_CACHE_TTL}s,history≤{_MAX_HISTORY_LENGTH},"
|
||||
f"工具轮≤{_max_tool_it},max_tokens 8192,budget {BUDGET_CONFIG})+ "
|
||||
"知你客服14号记忆方案(user_memory_*、四字段、MySQL 可选);完整提示词 + 记忆栈说明。"
|
||||
)
|
||||
else:
|
||||
_profile_note = ""
|
||||
desc = (
|
||||
f"{AGENT_NAME}:记作业(科目、内容、截止日)、跟进度、温和督促与周回顾;"
|
||||
"支持上传文件/照片后用 file_read 提取正文(文本、PDF、docx、xlsx、图片 OCR)与 json_process 整理;"
|
||||
f"默认模型 {PROVIDER}/{MODEL},单次执行内工具迭代上限 10。"
|
||||
f"默认模型 {PROVIDER}/{MODEL},单次执行内工具迭代上限 {_max_tool_it};"
|
||||
"持久记忆:Redis/cache + conversation_history;结构化 homework_board 写入 memory.context(末行 JSON)。 "
|
||||
+ _profile_note
|
||||
)
|
||||
|
||||
existing = _find_agent_id(h, AGENT_NAME)
|
||||
|
||||
38
backend/scripts/create_homework_manager_agent_3.py
Normal file
38
backend/scripts/create_homework_manager_agent_3.py
Normal file
@@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
一键创建或更新「学生作业管理助手3号」:
|
||||
|
||||
- 画布与作业链路与 2 号相同(Cache user_memory_* → LLM → Code 拆 JSON → 写回)。
|
||||
- **基础设施**与 2 号同档(长 TTL、history 上限、8192 max_tokens、DeepSeek 关闭 thinking 等)。
|
||||
- **记忆方案**显式对齐知你客服 14 号 / `agent记忆实现方案.md`(见主脚本内 HOMEWORK_V3_ZHINI14_APPEND + 完整版提示词)。
|
||||
|
||||
等价于:
|
||||
AGENT_NAME=学生作业管理助手3号 .\\venv\\Scripts\\python.exe scripts\\create_homework_manager_agent.py
|
||||
|
||||
用法:
|
||||
cd backend && .\\venv\\Scripts\\python.exe scripts\\create_homework_manager_agent_3.py
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
import os
|
||||
import sys
|
||||
|
||||
BACKEND_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
if BACKEND_DIR not in sys.path:
|
||||
sys.path.insert(0, BACKEND_DIR)
|
||||
|
||||
|
||||
def _run() -> int:
|
||||
# 必须覆盖外层 shell 里可能残留的 AGENT_NAME(如 2 号),否则会误改 2 号
|
||||
os.environ["AGENT_NAME"] = "学生作业管理助手3号"
|
||||
path = os.path.join(os.path.dirname(__file__), "create_homework_manager_agent.py")
|
||||
spec = importlib.util.spec_from_file_location("_homework_agent_mod", path)
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
assert spec.loader is not None
|
||||
spec.loader.exec_module(mod)
|
||||
return int(mod.main())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(_run())
|
||||
Reference in New Issue
Block a user