fix: 修复 Agent 流式对话无响应和工具 schema 兼容性问题

- 在 `run_stream()` LLM 调用前 yield `think` 事件,前端即时显示"思考中..."
- 修复 tool schema 规范化逻辑:`{"function":{...}}` 格式缺少 `type` 字段导致 LLM API 拒绝
- 启动时从数据库加载自定义工具(`load_tools_from_db`),解决重启后工具丢失
- 前端 SSE 添加 60s 超时保护,任何事件类型均触发 `receivedFirstEvent`
- 流式失败自动降级到非流式 POST
- 添加 `scripts/seed_coding_agent.py` 和 `scripts/test_coding_agent.py`

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
renjianbo
2026-05-02 00:38:41 +08:00
parent 342f3fcb16
commit 7aba0f9bc5
10 changed files with 662 additions and 36 deletions

View File

@@ -83,8 +83,9 @@ class AgentRuntime:
system_prompt=self.config.system_prompt,
user_id=self.config.user_id,
)
_mem_scope = self.config.memory_scope_id or self.config.user_id or self.config.name
self.memory = memory or AgentMemory(
scope_id=self.config.user_id or self.config.name,
scope_id=_mem_scope,
max_history=self.config.memory.max_history_messages,
persist=self.config.memory.persist_to_db,
)
@@ -398,6 +399,9 @@ class AgentRuntime:
"truncated": True}
return
# think 事件:告知前端 Agent 正在思考(让 UI 即时反馈,避免假死感)
yield {"type": "think", "content": "", "reasoning": None, "iteration": self.context.iteration}
# 调用 LLM
try:
response = await llm.chat(
@@ -687,7 +691,24 @@ class _LLMClient:
if self._config.extra_body:
kwargs["extra_body"] = self._config.extra_body
if tools:
kwargs["tools"] = tools
# Normalize tool schemas to OpenAI format: custom tools from the
# marketplace may be stored as {"name":..., "parameters":...}
# or {"function":{...}} without the required "type": "function".
normalized = []
for t in tools:
if isinstance(t, dict):
if t.get("type") == "function":
# Already in correct format: {"type":"function","function":{...}}
normalized.append(t)
elif "function" in t:
# Has function key but missing type: {"function":{...}}
normalized.append({"type": "function", "function": t["function"]})
else:
# Raw schema: {"name":..., "parameters":...}
normalized.append({"type": "function", "function": t})
else:
normalized.append(t)
kwargs["tools"] = normalized
kwargs["tool_choice"] = "auto"
start_time = time.perf_counter()

View File

@@ -52,6 +52,8 @@ class AgentConfig(BaseModel):
memory: AgentMemoryConfig = Field(default_factory=AgentMemoryConfig)
budget: AgentBudgetConfig = Field(default_factory=AgentBudgetConfig)
user_id: Optional[str] = None
# 持久记忆 / 向量记忆的 scope_id不设时沿用 user_id 或 name易与其他 Agent 串记忆)
memory_scope_id: Optional[str] = None
class AgentMessage(BaseModel):

View File

@@ -189,6 +189,8 @@ async def chat_bare(
db: Session = Depends(get_db),
):
"""无需 Agent 配置,使用默认设置直接对话。"""
uid = current_user.id
bare_scope = f"{uid}:__bare__" if uid else "__bare__"
config = AgentConfig(
name="bare_agent",
system_prompt="你是一个有用的AI助手。请使用可用工具来帮助用户完成任务。",
@@ -200,7 +202,8 @@ async def chat_bare(
temperature=req.temperature or 0.7,
max_iterations=req.max_iterations or 10,
),
user_id=current_user.id,
user_id=uid,
memory_scope_id=bare_scope,
)
on_llm_call = _make_llm_logger(db, agent_id=None, user_id=current_user.id)
runtime = AgentRuntime(config=config, on_llm_call=on_llm_call)
@@ -223,6 +226,8 @@ async def chat_bare_stream(
db: Session = Depends(get_db),
):
"""无需 Agent 配置,使用默认设置直接对话(流式 SSE"""
uid = current_user.id
bare_scope = f"{uid}:__bare__" if uid else "__bare__"
config = AgentConfig(
name="bare_agent",
system_prompt="你是一个有用的AI助手。请使用可用工具来帮助用户完成任务。",
@@ -234,7 +239,8 @@ async def chat_bare_stream(
temperature=req.temperature or 0.7,
max_iterations=req.max_iterations or 10,
),
user_id=current_user.id,
user_id=uid,
memory_scope_id=bare_scope,
)
on_llm_call = _make_llm_logger(db, agent_id=None, user_id=current_user.id)
runtime = AgentRuntime(config=config, on_llm_call=on_llm_call)
@@ -285,6 +291,8 @@ async def chat_with_agent(
if "max_tool_calls" in bc and bc["max_tool_calls"] is not None:
budget.max_tool_calls = max(1, int(bc["max_tool_calls"]))
uid = current_user.id
mem_scope = f"{uid}:{agent_id}" if uid else str(agent_id)
config = AgentConfig(
name=agent.name,
system_prompt=system_prompt,
@@ -299,7 +307,8 @@ async def chat_with_agent(
exclude_tools=agent_node_cfg.get("exclude_tools", []),
),
budget=budget,
user_id=current_user.id,
user_id=uid,
memory_scope_id=mem_scope,
)
on_llm_call = _make_llm_logger(db, agent_id=agent_id, user_id=current_user.id)
@@ -349,6 +358,8 @@ async def chat_with_agent_stream(
if "max_tool_calls" in bc and bc["max_tool_calls"] is not None:
budget.max_tool_calls = max(1, int(bc["max_tool_calls"]))
uid = current_user.id
mem_scope = f"{uid}:{agent_id}" if uid else str(agent_id)
config = AgentConfig(
name=agent.name,
system_prompt=system_prompt,
@@ -363,7 +374,8 @@ async def chat_with_agent_stream(
exclude_tools=agent_node_cfg.get("exclude_tools", []),
),
budget=budget,
user_id=current_user.id,
user_id=uid,
memory_scope_id=mem_scope,
)
on_llm_call = _make_llm_logger(db, agent_id=agent_id, user_id=current_user.id)

View File

@@ -200,6 +200,18 @@ async def startup_event():
logger.error(f"内置工具注册失败: {e}")
# 不抛出异常,允许应用继续启动
# 加载自定义工具(从数据库同步到注册表)
try:
from app.core.database import SessionLocal
db = SessionLocal()
try:
tool_registry.load_tools_from_db(db)
logger.info("自定义工具加载完成count=%s", len(tool_registry._tool_schemas) - tool_registry.builtin_tool_count())
finally:
db.close()
except Exception as e:
logger.error(f"自定义工具加载失败: {e}")
# 注册路由
from app.api import auth, uploads, workflows, executions, websocket, execution_logs, data_sources, agents, platform_templates, model_configs, webhooks, template_market, batch_operations, collaboration, permissions, monitoring, alert_rules, node_test, node_templates, tools, agent_chat, agent_monitoring, knowledge_base

View File

@@ -21,6 +21,20 @@ _DSML_NO_ARG_TOOLS = {
}
def _chat_messages(
user_content: str,
system_prompt: Optional[str] = None,
) -> List[Dict[str, str]]:
"""构建 chat.completions messages可选 system + user。"""
msgs: List[Dict[str, str]] = []
if system_prompt is not None:
sp = system_prompt.strip()
if sp:
msgs.append({"role": "system", "content": sp})
msgs.append({"role": "user", "content": user_content})
return msgs
def _resolve_request_timeout(kwargs: Dict[str, Any]) -> float:
"""
统一解析 LLM 请求超时时间(秒):
@@ -415,6 +429,7 @@ class LLMService:
max_tokens: Optional[int] = None,
api_key: Optional[str] = None,
base_url: Optional[str] = None,
system_prompt: Optional[str] = None,
**kwargs
) -> str:
"""
@@ -470,9 +485,7 @@ class LLMService:
try:
response = await client.chat.completions.create(
model=model,
messages=[
{"role": "user", "content": prompt}
],
messages=_chat_messages(prompt, system_prompt),
temperature=temperature,
max_tokens=max_tokens,
timeout=request_timeout,
@@ -507,6 +520,7 @@ class LLMService:
max_tokens: Optional[int] = None,
api_key: Optional[str] = None,
base_url: Optional[str] = None,
system_prompt: Optional[str] = None,
**kwargs
) -> str:
"""
@@ -566,9 +580,7 @@ class LLMService:
try:
response = await client.chat.completions.create(
model=model,
messages=[
{"role": "user", "content": prompt}
],
messages=_chat_messages(prompt, system_prompt),
temperature=temperature,
max_tokens=max_tokens,
timeout=request_timeout,
@@ -602,6 +614,7 @@ class LLMService:
model: Optional[str] = None,
temperature: float = 0.7,
max_tokens: Optional[int] = None,
system_prompt: Optional[str] = None,
**kwargs
) -> str:
"""
@@ -627,6 +640,7 @@ class LLMService:
model=model,
temperature=temperature,
max_tokens=max_tokens,
system_prompt=system_prompt,
**kwargs
)
elif provider == "deepseek":
@@ -638,6 +652,7 @@ class LLMService:
model=model,
temperature=temperature,
max_tokens=max_tokens,
system_prompt=system_prompt,
**kwargs
)
else:
@@ -658,6 +673,7 @@ class LLMService:
on_tool_executed: Optional[Callable[[str], Awaitable[None]]] = None,
request_timeout: Optional[float] = None,
extra_body: Optional[Dict[str, Any]] = None,
system_prompt: Optional[str] = None,
) -> str:
"""
调用OpenAI API支持工具调用
@@ -693,7 +709,7 @@ class LLMService:
raise ValueError("OpenAI API Key未配置")
client = self.openai_client
messages = [{"role": "user", "content": prompt}]
messages = _chat_messages(prompt, system_prompt)
request_timeout = _resolve_request_timeout(
{"request_timeout": request_timeout} if request_timeout is not None else {}
@@ -716,10 +732,11 @@ class LLMService:
openai_tools = []
for tool in tools:
if isinstance(tool, dict):
if "type" in tool and tool["type"] == "function":
if tool.get("type") == "function":
openai_tools.append(tool)
elif "function" in tool:
openai_tools.append(tool)
# Has function but missing type
openai_tools.append({"type": "function", "function": tool["function"]})
else:
# 假设是function格式包装一下
openai_tools.append({
@@ -884,6 +901,7 @@ class LLMService:
on_tool_executed: Optional[Callable[[str], Awaitable[None]]] = None,
request_timeout: Optional[float] = None,
extra_body: Optional[Dict[str, Any]] = None,
system_prompt: Optional[str] = None,
) -> str:
"""
调用DeepSeek API支持工具调用DeepSeek兼容OpenAI API格式
@@ -903,6 +921,7 @@ class LLMService:
on_tool_executed=on_tool_executed,
request_timeout=request_timeout,
extra_body=extra_body,
system_prompt=system_prompt,
)
async def call_llm_with_tools(
@@ -917,6 +936,7 @@ class LLMService:
tool_choice: Optional[str] = None,
on_tool_executed: Optional[Callable[[str], Awaitable[None]]] = None,
request_timeout: Optional[float] = None,
system_prompt: Optional[str] = None,
**kwargs
) -> str:
"""
@@ -947,6 +967,7 @@ class LLMService:
tool_choice=tool_choice,
on_tool_executed=on_tool_executed,
request_timeout=request_timeout,
system_prompt=system_prompt,
**kwargs
)
elif provider == "deepseek":
@@ -962,6 +983,7 @@ class LLMService:
tool_choice=tool_choice,
on_tool_executed=on_tool_executed,
request_timeout=request_timeout,
system_prompt=system_prompt,
**kwargs
)
else:

View File

@@ -1434,9 +1434,13 @@ class WorkflowEngine:
logger.debug(f"[rjb] LLM节点数据: node_id={node_id}, node_data keys={list(node_data.keys())}, api_key={'已配置' if node_data.get('api_key') else '未配置'}")
prompt = node_data.get('prompt', '')
# 如果prompt为空,使用默认提示词
# 如果 prompt 为空:不要用 {input} 展开整包 input_data。
# 预览/执行侧常带 user_id、memory、conversation_history 等大对象,模型易照抄成 ```json 回复。
if not prompt:
prompt = "请处理以下输入数据:\n{input}"
prompt = (
"请根据用户当前问题用自然语言回答;需要时可用工具。"
"不要向用户复述或输出完整的 input_data / API 请求 JSON。"
)
# 格式化prompt替换变量
try:
@@ -1804,6 +1808,15 @@ class WorkflowEngine:
# 记录实际发送给LLM的prompt
logger.info(f"[rjb] 准备调用LLM: node_id={node_id}, provider={provider}, model={model}, prompt前200字符='{prompt[:200] if len(prompt) > 200 else prompt}'")
_raw_sys = node_data.get("system_prompt")
llm_system_prompt: Optional[str] = None
if isinstance(_raw_sys, str) and _raw_sys.strip():
llm_system_prompt = _raw_sys.strip()
elif _raw_sys is not None and not isinstance(_raw_sys, (dict, list)):
_ts = str(_raw_sys).strip()
if _ts:
llm_system_prompt = _ts
# 检查是否启用工具调用
enable_tools = node_data.get('enable_tools', False)
@@ -1872,6 +1885,7 @@ class WorkflowEngine:
execution_logger=self.logger,
tool_choice=_tool_choice,
on_tool_executed=self._on_tool_executed_budget,
system_prompt=llm_system_prompt,
**_merged_tool_kw,
)
result = self._enrich_llm_json_user_profile(result, input_data)
@@ -1882,6 +1896,7 @@ class WorkflowEngine:
model=model,
temperature=temperature,
max_tokens=max_tokens,
system_prompt=llm_system_prompt,
**llm_extra_kw,
)
result = self._enrich_llm_json_user_profile(result, input_data)

View File

@@ -396,9 +396,11 @@ async function sendMessage() {
? `/api/v1/agent-chat/${currentAgentId.value}/stream`
: '/api/v1/agent-chat/bare/stream'
// 尝试 SSE 流式
// 尝试 SSE 流式(带超时控制)
let usedStreaming = false
streamingActive.value = false
const abortController = new AbortController()
const streamTimeout = setTimeout(() => abortController.abort(), 60000)
try {
const token = localStorage.getItem('token') || ''
const resp = await fetch(streamEndpoint, {
@@ -408,6 +410,7 @@ async function sendMessage() {
...(token ? { 'Authorization': `Bearer ${token}` } : {}),
},
body: JSON.stringify({ message: text, session_id: sessId || undefined }),
signal: abortController.signal,
})
if (resp.ok && resp.body) {
@@ -447,16 +450,17 @@ async function sendMessage() {
try {
const data = JSON.parse(dataStr)
// 首个事件到达 → 隐藏 loading dots
if (!receivedFirstEvent && (eventType === 'think' || eventType === 'tool_call' || eventType === 'tool_result')) {
// 首个事件到达 → 隐藏 loading dots,无论什么事件类型
if (!receivedFirstEvent) {
receivedFirstEvent = true
streamingActive.value = true
}
if (eventType === 'think') {
const thinkContent = data.content || '思考中...'
currentMsg.steps!.push({
iteration: data.iteration, type: 'think',
content: data.content || '',
content: thinkContent,
reasoning: data.reasoning,
tool_name: data.tool_names?.[0],
})
@@ -491,7 +495,10 @@ async function sendMessage() {
}
}
}
} catch { /* 流式不可用,降级到普通 POST */
} catch {
clearTimeout(streamTimeout)
// 流式失败时标记为非流式,让 fallback POST 兜底
usedStreaming = false
streamingActive.value = false
}

View File

@@ -0,0 +1,361 @@
"""创建代码编程助手:开发者工具 + Agent 配置"""
import json
import urllib.request
import urllib.parse
import uuid
BASE = "http://localhost:8037"
def req(method, path, headers=None, body=None, raw_body=None):
hdrs = {"Content-Type": "application/json"}
if headers: hdrs.update(headers)
data = raw_body if raw_body else (json.dumps(body).encode() if body else None)
r = urllib.request.Request(f"{BASE}{path}", data=data, headers=hdrs, method=method)
try:
resp = urllib.request.urlopen(r, timeout=15)
return resp.status, json.loads(resp.read())
except urllib.request.HTTPError as e:
return e.code, json.loads(e.read())
except Exception as e:
return 0, {"error": str(e)}
# 1. 登录/注册
_, _ = req("POST", "/api/v1/auth/register", body={
"username": "codingbot", "email": "coding@test.com", "password": "test123456"
})
status, login_data = req("POST", "/api/v1/auth/login",
headers={"Content-Type": "application/x-www-form-urlencoded"},
raw_body=urllib.parse.urlencode({"username": "codingbot", "password": "test123456"}).encode())
if status != 200:
print(f"Login failed: {login_data}")
exit(1)
token = login_data["access_token"]
auth = {"Authorization": f"Bearer {token}"}
print("OK 用户已登录")
# 2. 创建开发者工具
dev_tools = [
{
"name": "execute_code",
"description": "在安全沙箱中执行Python代码返回执行结果和stdout/stderr。支持任意Python代码可用于运行脚本、测试算法、数据处理等",
"category": "开发者工具",
"implementation_type": "code",
"is_public": True,
"function_schema": {
"name": "execute_code",
"description": "执行Python代码",
"parameters": {
"type": "object",
"properties": {
"code": {"type": "string", "description": "要执行的Python代码"},
"timeout": {"type": "integer", "description": "超时秒数", "default": 10}
},
"required": ["code"]
}
},
"implementation_config": {
"source": """def run(args):
import sys, io, contextlib, json, traceback, time
code = args.get("code", "")
timeout = int(args.get("timeout", 10))
stdout_capture = io.StringIO()
stderr_capture = io.StringIO()
result = {"stdout": "", "stderr": "", "error": None}
namespace = {}
start = time.time()
try:
with contextlib.redirect_stdout(stdout_capture), contextlib.redirect_stderr(stderr_capture):
exec(code, namespace)
result["stdout"] = stdout_capture.getvalue()
result["stderr"] = stderr_capture.getvalue()
if "__returns__" in namespace:
result["result"] = str(namespace["__returns__"])
for key in ["result", "output", "ret"]:
if key in namespace and key not in ("result",):
result["result"] = str(namespace[key])
break
except Exception as e:
result["error"] = traceback.format_exc()
result["stderr"] = stderr_capture.getvalue()
result["elapsed_ms"] = int((time.time() - start) * 1000)
return result"""
}
},
{
"name": "grep_search",
"description": "在项目文件中搜索文本,支持正则表达式和通配符过滤。类似 grep 命令",
"category": "开发者工具",
"implementation_type": "code",
"is_public": True,
"function_schema": {
"name": "grep_search",
"description": "搜索项目文件中的文本",
"parameters": {
"type": "object",
"properties": {
"pattern": {"type": "string", "description": "搜索模式(支持正则)"},
"file_pattern": {"type": "string", "description": "文件通配符过滤,如 *.py, *.ts, *.vue", "default": "*"},
"path": {"type": "string", "description": "搜索路径相对于项目根默认backend", "default": "."},
"max_results": {"type": "integer", "description": "最大结果数", "default": 20}
},
"required": ["pattern"]
}
},
"implementation_config": {
"source": """def run(args):
import os, re, fnmatch
pattern = args.get("pattern", "")
file_pattern = args.get("file_pattern", "*")
root = args.get("path", ".")
max_results = int(args.get("max_results", 20))
results = []
errors = []
try:
for dirpath, dirnames, filenames in os.walk(root):
dirnames[:] = [d for d in dirnames if not d.startswith(".") and d != "node_modules" and d != "__pycache__"]
for f in sorted(filenames):
if not fnmatch.fnmatch(f, file_pattern):
continue
fpath = os.path.join(dirpath, f)
try:
with open(fpath, "r", encoding="utf-8", errors="replace") as fh:
for lineno, line in enumerate(fh, 1):
if re.search(pattern, line):
relpath = os.path.relpath(fpath, root)
results.append(f"{relpath}:{lineno}:{line.rstrip()[:200]}")
if len(results) >= max_results:
break
except Exception as e:
errors.append(str(e))
if len(results) >= max_results:
break
if len(results) >= max_results:
break
except Exception as e:
errors.append(str(e))
return {"results": results, "count": len(results), "errors": errors[:5], "truncated": len(results) >= max_results}"""
}
},
{
"name": "list_files",
"description": "列出项目目录中的文件和子目录,支持递归和过滤",
"category": "开发者工具",
"implementation_type": "code",
"is_public": True,
"function_schema": {
"name": "list_files",
"description": "列出目录文件",
"parameters": {
"type": "object",
"properties": {
"path": {"type": "string", "description": "目录路径(相对于项目根)", "default": "."},
"recursive": {"type": "boolean", "description": "是否递归", "default": False},
"max_depth": {"type": "integer", "description": "递归最大深度", "default": 2}
},
"required": []
}
},
"implementation_config": {
"source": """def run(args):
import os
path = args.get("path", ".")
recursive = bool(args.get("recursive", False))
max_depth = int(args.get("max_depth", 2))
skip_dirs = {".git", "node_modules", "__pycache__", ".venv", ".claude", "dist", ".vite"}
skip_ext = {".pyc", ".pyo"}
def _walk(dirpath, depth=0):
items = []
try:
for name in sorted(os.listdir(dirpath)):
if name.startswith("."):
continue
full = os.path.join(dirpath, name)
rel = os.path.relpath(full, path)
is_dir = os.path.isdir(full)
if is_dir and name in skip_dirs:
continue
size = ""
if not is_dir:
try: size = os.path.getsize(full)
except: size = 0
if size and size > 1024:
size = f"{size/1024:.1f}KB"
elif size:
size = f"{size}B"
ext = os.path.splitext(name)[1]
if ext in skip_ext:
continue
items.append({
"name": rel.replace("\\\\", "/"),
"type": "dir" if is_dir else "file",
"size": size,
})
if is_dir and recursive and depth < max_depth:
items.extend(_walk(full, depth + 1))
except Exception as e:
items.append({"name": f"[error: {e}]", "type": "error"})
return items
all_items = _walk(path)
dirs = [i for i in all_items if i["type"] == "dir"]
files = [i for i in all_items if i["type"] == "file"]
return {"path": path, "directories": len(dirs), "files": len(files), "items": dirs + files}"""
}
},
{
"name": "git_log",
"description": "查看Git提交历史获取最近更改记录",
"category": "开发者工具",
"implementation_type": "code",
"is_public": True,
"function_schema": {
"name": "git_log",
"description": "查看Git提交历史",
"parameters": {
"type": "object",
"properties": {
"count": {"type": "integer", "description": "最近提交数", "default": 10},
"path": {"type": "string", "description": "查看特定文件的提交历史(可选)"}
},
"required": []
}
},
"implementation_config": {
"source": """def run(args):
import subprocess, os
count = int(args.get("count", 10))
file_path = args.get("path")
cmds = ["git", "log", f"--max-count={count}", "--pretty=format:%h|%an|%ad|%s", "--date=short"]
if file_path:
cmds.append("--")
cmds.append(file_path)
try:
result = subprocess.run(cmds, capture_output=True, text=True, timeout=15)
if result.returncode != 0:
return {"error": result.stderr[:500], "commits": []}
commits = []
for line in result.stdout.strip().split("\\n"):
if not line: continue
parts = line.split("|", 3)
if len(parts) == 4:
commits.append({"hash": parts[0], "author": parts[1], "date": parts[2], "message": parts[3]})
return {"commits": commits, "count": len(commits)}
except Exception as e:
return {"error": str(e), "commits": []}"""
}
},
]
created = 0
failed = 0
for t in dev_tools:
status, data = req("POST", "/api/v1/tools", headers=auth, body=t)
if status == 201:
print(f" OK {t['name']}")
created += 1
elif status == 400 and "已存在" in str(data.get("detail", "")):
print(f" - {t['name']} (already exists)")
created += 1
else:
print(f" FAIL {t['name']}: {data.get('detail', data)}")
failed += 1
print(f"Tools created: {created} ok, {failed} failed")
# 3. 创建编程助手 Agent (with workflow config)
_start_id = str(uuid.uuid4())
_llm_id = str(uuid.uuid4())
_end_id = str(uuid.uuid4())
agent_config = {
"name": "代码编程助手",
"description": "专业的代码编程助手,能够理解项目结构、搜索代码、执行和测试代码",
"workflow_config": {
"nodes": [
{
"id": _start_id,
"type": "start",
"position": {"x": 100, "y": 200},
"data": {"label": "开始"},
},
{
"id": _llm_id,
"type": "llm",
"position": {"x": 350, "y": 200},
"data": {
"label": "代码编程助手",
"system_prompt": (
"你是代码编程助手 CodeBot一个专业的软件工程AI助手。\n\n"
"## 核心能力\n"
"你擅长阅读、理解、编写和调试代码。你可以使用各种工具来帮助用户完成编程任务。\n\n"
"## 可用工具\n"
"- **file_read**: 读取项目文件\n"
"- **file_write**: 写入/修改文件\n"
"- **execute_code**: 在沙箱中执行Python代码快速验证逻辑\n"
"- **grep_search**: 在项目中搜索代码\n"
"- **list_files**: 浏览项目目录结构\n"
"- **git_log**: 查看Git提交历史\n"
"- **http_request**: 发送HTTP请求\n"
"- **text_analyze**: 文本分析\n"
"- **json_process**: JSON处理\n\n"
"## 工作流程\n"
"1. 先理解用户需求,必要时浏览项目结构了解代码组织\n"
"2. 搜索相关代码定位需要修改或参考的位置\n"
"3. 阅读相关文件完整理解上下文\n"
"4. 编写或修改代码\n"
"5. 使用 execute_code 测试代码逻辑\n"
"6. 向用户解释修改的内容和原因\n\n"
"## 回答风格\n"
"- 清晰、准确、有逻辑\n"
"- 展示代码时添加适当注释\n"
"- 解释代码的原理和设计思路\n"
"- 如果存在多种方案,对比优缺点\n"
"- 指出潜在的风险和注意事项\n\n"
"## 边界\n"
"若用户问「你有什么能力」「你能做什么」,只介绍与编程、软件工程及上文工具相关的能力;"
"不要列举写诗、泛泛日常助手等与编程无关的能力。"
),
"model": "deepseek-v4-flash",
"provider": "deepseek",
"temperature": 0.3,
"max_iterations": 30,
"tools": [
"file_read", "file_write", "execute_code", "grep_search",
"list_files", "git_log", "http_request", "text_analyze", "json_process",
],
"memory": True,
},
},
{
"id": _end_id,
"type": "end",
"position": {"x": 600, "y": 200},
"data": {"label": "结束"},
},
],
"edges": [
{"id": str(uuid.uuid4()), "source": _start_id, "target": _llm_id},
{"id": str(uuid.uuid4()), "source": _llm_id, "target": _end_id},
],
},
"budget_config": {
"max_llm_invocations": 100,
"max_tool_calls": 200,
},
}
status, data = req("POST", "/api/v1/agents", headers=auth, body=agent_config)
if status in (200, 201):
agent_id = data.get("id", "")
print(f"\nOK Agent created: {agent_id}")
else:
print(f"\nFAIL Agent creation: {data}")
# Try to update existing agent
print("Checking existing agents...")
s, agents = req("GET", "/api/v1/agents", headers=auth)
if s == 200 and isinstance(agents, list):
for a in agents:
if a.get("name") == "代码编程助手":
print(f" Already exists: {a.get('id')}")
print("\nDone! Go to Agent Management -> Code Programming Assistant -> Chat to start using it.")

View File

@@ -0,0 +1,174 @@
"""
测试代码编程助手 Agent — 验证流式和非流式对话是否正常工作。
"""
import json
import urllib.request
import urllib.parse
import time
import sys
BASE = "http://localhost:8037"
AGENT_ID = "010c0813-d45c-4c97-b3fc-21cedc6d4f9d"
def req(method, path, headers=None, body=None, raw_body=None, timeout=15):
hdrs = {"Content-Type": "application/json"}
if headers:
hdrs.update(headers)
data = raw_body if raw_body else (json.dumps(body).encode() if body else None)
r = urllib.request.Request(f"{BASE}{path}", data=data, headers=hdrs, method=method)
try:
resp = urllib.request.urlopen(r, timeout=timeout)
return resp.status, json.loads(resp.read())
except urllib.request.HTTPError as e:
return e.code, json.loads(e.read())
except Exception as e:
return 0, {"error": str(e)}
def login():
_, _ = req("POST", "/api/v1/auth/register", body={
"username": "codingbot", "email": "coding@test.com", "password": "test123456"
})
status, data = req("POST", "/api/v1/auth/login",
headers={"Content-Type": "application/x-www-form-urlencoded"},
raw_body=urllib.parse.urlencode(
{"username": "codingbot", "password": "test123456"}).encode())
if status != 200:
print(f"[FAIL] Login: {data}")
sys.exit(1)
token = data["access_token"]
print(f"[OK] Login, token: {token[:20]}...")
return {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
def test_non_streaming(auth, message, timeout=120):
"""测试非流式对话 POST /api/v1/agent-chat/{agent_id}"""
body = json.dumps({"message": message, "temperature": 0.3}).encode()
r = urllib.request.Request(
f"{BASE}/api/v1/agent-chat/{AGENT_ID}",
data=body, headers=auth, method="POST"
)
start = time.time()
try:
resp = urllib.request.urlopen(r, timeout=timeout)
elapsed = time.time() - start
result = json.loads(resp.read())
content = result.get("content", "")
print(f"[OK] 非流式 | {elapsed:6.1f}s | 内容={len(content)}字 | "
f"迭代={result.get('iterations_used')} | "
f"工具={result.get('tool_calls_made')} | "
f"截断={result.get('truncated')}")
print(f" 前100字: {content[:100]}")
return True
except urllib.request.HTTPError as e:
elapsed = time.time() - start
print(f"[FAIL] 非流式 | {elapsed:6.1f}s | HTTP {e.code}: {e.read().decode()[:200]}")
return False
except Exception as e:
elapsed = time.time() - start
print(f"[FAIL] 非流式 | {elapsed:6.1f}s | {e}")
return False
def test_streaming(auth, message, timeout=120):
"""测试流式对话 POST /api/v1/agent-chat/{agent_id}/stream"""
body = json.dumps({"message": message, "temperature": 0.3}).encode()
r = urllib.request.Request(
f"{BASE}/api/v1/agent-chat/{AGENT_ID}/stream",
data=body, headers=auth, method="POST"
)
start = time.time()
try:
resp = urllib.request.urlopen(r, timeout=timeout)
data = resp.read().decode()
elapsed = time.time() - start
# 解析 SSE 事件
events = []
for part in data.split("\n\n"):
part = part.strip()
if not part:
continue
lines = part.split("\n")
event_type = ""
event_data = {}
for line in lines:
if line.startswith("event: "):
event_type = line[7:]
elif line.startswith("data: "):
try:
event_data = json.loads(line[6:])
except json.JSONDecodeError:
event_data = {"raw": line[6:]}
if event_type:
events.append({"type": event_type, "data": event_data})
# 分析
event_types = [e["type"] for e in events]
content = ""
for e in events:
if e["type"] == "final":
content = e["data"].get("content", "")
print(f"[OK] 流式 | {elapsed:6.1f}s | {len(events)}个事件 | "
f"内容={len(content)}")
print(f" 事件序列: {event_types}")
print(f" 前100字: {content[:100]}")
# 验证
assert "final" in event_types, "缺少 final 事件"
assert events[-1]["type"] == "final", "最后一个事件不是 final"
print(f" 验证通过: final事件为末, 含内容")
return True
except urllib.request.HTTPError as e:
elapsed = time.time() - start
print(f"[FAIL] 流式 | {elapsed:6.1f}s | HTTP {e.code}: {e.read().decode()[:200]}")
return False
except Exception as e:
elapsed = time.time() - start
print(f"[FAIL] 流式 | {elapsed:6.1f}s | {e}")
return False
def main():
print("=" * 60)
print(" 代码编程助手 - Agent 对话测试")
print("=" * 60)
auth = login()
test_cases = [
("问候", "你好"),
("代码", "写一个Python函数判断素数"),
("搜索", "grep_search工具怎么用"),
("文件", "帮我读一下README.md的第一行"),
]
passed = 0
failed = 0
for name, msg in test_cases:
print(f"\n--- 测试: {name} ---")
ok = test_non_streaming(auth, msg)
if ok:
passed += 1
else:
failed += 1
ok = test_streaming(auth, msg)
if ok:
passed += 1
else:
failed += 1
print("\n" + "=" * 60)
print(f" 结果: {passed} 通过, {failed} 失败, 共 {passed + failed} 测试")
print("=" * 60)
if failed > 0:
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -19,57 +19,57 @@ function Test-PortListening([int]$Port) {
function Ensure-Redis {
if (Test-PortListening 6379) {
Write-Host "[OK] Redis already listening on 6379" -ForegroundColor Green
Write-Host '[OK] Redis already listening on 6379' -ForegroundColor Green
return
}
if (-not (Test-Path $RedisExe)) {
throw "Redis 可执行文件不存在:$RedisExe"
throw "Redis executable not found: $RedisExe"
}
Write-Host "[RUN] Starting Redis on 6379 ..." -ForegroundColor Yellow
Write-Host '[RUN] Starting Redis on 6379 ...' -ForegroundColor Yellow
Start-Process -FilePath $RedisExe -ArgumentList "--port 6379" -WorkingDirectory $RedisDir | Out-Null
Start-Sleep -Seconds 2
if (-not (Test-PortListening 6379)) {
throw "Redis 启动失败6379 未监听"
throw "Redis failed: port 6379 not listening"
}
if (Test-Path $RedisCli) {
& $RedisCli -p 6379 ping | Out-Null
}
Write-Host "[OK] Redis started" -ForegroundColor Green
Write-Host '[OK] Redis started' -ForegroundColor Green
}
function Resolve-ApiPort {
if (-not (Test-PortListening $ApiPort)) {
return $ApiPort
}
Write-Host "[WARN] Port $ApiPort is occupied, switching to $FallbackApiPort" -ForegroundColor Yellow
Write-Host ('[WARN] Port {0} is occupied, switching to {1}' -f $ApiPort, $FallbackApiPort) -ForegroundColor Yellow
if (Test-PortListening $FallbackApiPort) {
throw "端口 $ApiPort $FallbackApiPort 都被占用,请先释放端口"
throw "Ports $ApiPort and $FallbackApiPort are in use; free one first"
}
return $FallbackApiPort
}
Write-Host "== AIAgent Windows 一键启动 ==" -ForegroundColor Cyan
Write-Host '== AIAgent one-click start ==' -ForegroundColor Cyan
Write-Host "Repo: $RepoRoot"
Ensure-Redis
$RealApiPort = Resolve-ApiPort
$ApiBase = "http://127.0.0.1:$RealApiPort"
Write-Host "[RUN] Starting backend API on $RealApiPort ..." -ForegroundColor Yellow
Write-Host ('[RUN] Starting backend API on {0} ...' -f $RealApiPort) -ForegroundColor Yellow
Start-Process powershell -ArgumentList @(
"-NoExit",
"-Command",
"cd '$Backend'; .\venv\Scripts\Activate.ps1; python -m uvicorn app.main:app --host 0.0.0.0 --port $RealApiPort"
)
Write-Host "[RUN] Starting Celery worker ..." -ForegroundColor Yellow
Write-Host '[RUN] Starting Celery worker ...' -ForegroundColor Yellow
Start-Process powershell -ArgumentList @(
"-NoExit",
"-Command",
"cd '$Backend'; .\venv\Scripts\Activate.ps1; python -m celery -A app.core.celery_app worker --loglevel=info --pool=threads --concurrency=8"
)
Write-Host "[RUN] Starting frontend on $FrontendPort (proxy -> $ApiBase) ..." -ForegroundColor Yellow
Write-Host ('[RUN] Starting frontend on {0} (proxy -> {1}) ...' -f $FrontendPort, $ApiBase) -ForegroundColor Yellow
Start-Process powershell -ArgumentList @(
"-NoExit",
"-Command",
@@ -77,7 +77,7 @@ Start-Process powershell -ArgumentList @(
)
Write-Host ""
Write-Host "[DONE] 启动命令已下发" -ForegroundColor Green
Write-Host "前端: http://localhost:$FrontendPort" -ForegroundColor Cyan
Write-Host "后端: $ApiBase/docs" -ForegroundColor Cyan
Write-Host '[DONE] Start commands issued (check new PowerShell windows)' -ForegroundColor Green
Write-Host "Frontend: http://localhost:$FrontendPort" -ForegroundColor Cyan
Write-Host "API docs: $ApiBase/docs" -ForegroundColor Cyan
Write-Host "Redis: 127.0.0.1:6379" -ForegroundColor Cyan