fix: 修复 Agent 流式对话无响应和工具 schema 兼容性问题

- 在 `run_stream()` LLM 调用前 yield `think` 事件,前端即时显示"思考中..."
- 修复 tool schema 规范化逻辑:`{"function":{...}}` 格式缺少 `type` 字段导致 LLM API 拒绝
- 启动时从数据库加载自定义工具(`load_tools_from_db`),解决重启后工具丢失
- 前端 SSE 添加 60s 超时保护,任何事件类型均触发 `receivedFirstEvent`
- 流式失败自动降级到非流式 POST
- 添加 `scripts/seed_coding_agent.py` 和 `scripts/test_coding_agent.py`

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
renjianbo
2026-05-02 00:38:41 +08:00
parent 342f3fcb16
commit 7aba0f9bc5
10 changed files with 662 additions and 36 deletions

View File

@@ -189,6 +189,8 @@ async def chat_bare(
db: Session = Depends(get_db),
):
"""无需 Agent 配置,使用默认设置直接对话。"""
uid = current_user.id
bare_scope = f"{uid}:__bare__" if uid else "__bare__"
config = AgentConfig(
name="bare_agent",
system_prompt="你是一个有用的AI助手。请使用可用工具来帮助用户完成任务。",
@@ -200,7 +202,8 @@ async def chat_bare(
temperature=req.temperature or 0.7,
max_iterations=req.max_iterations or 10,
),
user_id=current_user.id,
user_id=uid,
memory_scope_id=bare_scope,
)
on_llm_call = _make_llm_logger(db, agent_id=None, user_id=current_user.id)
runtime = AgentRuntime(config=config, on_llm_call=on_llm_call)
@@ -223,6 +226,8 @@ async def chat_bare_stream(
db: Session = Depends(get_db),
):
"""无需 Agent 配置,使用默认设置直接对话(流式 SSE"""
uid = current_user.id
bare_scope = f"{uid}:__bare__" if uid else "__bare__"
config = AgentConfig(
name="bare_agent",
system_prompt="你是一个有用的AI助手。请使用可用工具来帮助用户完成任务。",
@@ -234,7 +239,8 @@ async def chat_bare_stream(
temperature=req.temperature or 0.7,
max_iterations=req.max_iterations or 10,
),
user_id=current_user.id,
user_id=uid,
memory_scope_id=bare_scope,
)
on_llm_call = _make_llm_logger(db, agent_id=None, user_id=current_user.id)
runtime = AgentRuntime(config=config, on_llm_call=on_llm_call)
@@ -285,6 +291,8 @@ async def chat_with_agent(
if "max_tool_calls" in bc and bc["max_tool_calls"] is not None:
budget.max_tool_calls = max(1, int(bc["max_tool_calls"]))
uid = current_user.id
mem_scope = f"{uid}:{agent_id}" if uid else str(agent_id)
config = AgentConfig(
name=agent.name,
system_prompt=system_prompt,
@@ -299,7 +307,8 @@ async def chat_with_agent(
exclude_tools=agent_node_cfg.get("exclude_tools", []),
),
budget=budget,
user_id=current_user.id,
user_id=uid,
memory_scope_id=mem_scope,
)
on_llm_call = _make_llm_logger(db, agent_id=agent_id, user_id=current_user.id)
@@ -349,6 +358,8 @@ async def chat_with_agent_stream(
if "max_tool_calls" in bc and bc["max_tool_calls"] is not None:
budget.max_tool_calls = max(1, int(bc["max_tool_calls"]))
uid = current_user.id
mem_scope = f"{uid}:{agent_id}" if uid else str(agent_id)
config = AgentConfig(
name=agent.name,
system_prompt=system_prompt,
@@ -363,7 +374,8 @@ async def chat_with_agent_stream(
exclude_tools=agent_node_cfg.get("exclude_tools", []),
),
budget=budget,
user_id=current_user.id,
user_id=uid,
memory_scope_id=mem_scope,
)
on_llm_call = _make_llm_logger(db, agent_id=agent_id, user_id=current_user.id)