Files
aiagent/backend/app/services/optimization_engine.py
renjianbo ab1589921a fix: 修复35个安全与功能缺陷,补全知识进化/数字孪生/行为采集模块
## 安全修复 (12项)
- Webhook接口添加全局Token认证,过滤敏感请求头
- 修复JWT Base64 padding公式,防止签名验证绕过
- 数据库密码/飞书Token从源码移除,改为环境变量
- 工作流引擎添加路径遍历防护 (_resolve_safe_path)
- eval()添加模板长度上限检查
- 审批API添加认证依赖
- 前端v-html增强XSS转义,console.log仅开发模式输出
- 500错误不再暴露内部异常详情

## Agent运行时修复 (7项)
- 删除_inject_knowledge_context中未定义db变量的finally块
- 工具执行添加try/except保护,异常不崩溃Agent
- LLM重试计入budget计数器
- self_review异常时passed=False
- max_iterations截断标记success=False
- 工具参数JSON解析失败时记录警告日志
- run()开始时重置_llm_invocations计数器

## 配置与基础设施
- DEBUG默认False,SQL_ECHO独立配置项
- init_db()补全13个缺失模型导入
- 新增WEBHOOK_AUTH_TOKEN/SQL_ECHO配置项
- 新增.env.example模板文件

## 前端修复 (12项)
- 登录改用URLSearchParams替代FormData
- 401拦截器通过Pinia store统一清理状态
- SSE流超时从60s延长至300s
- final/error事件时清除streamTimeout
- localStorage聊天记录添加24h TTL
- safeParseArgCount替代模板中裸JSON.parse
- fetchUser 401时同时清除user对象

## 新增模块
- 知识进化: knowledge_extractor/retriever/tasks
- 数字孪生: shadow_executor/comparison模型
- 行为采集: behavior_middleware/collector/fingerprint_engine
- 代码审查: code_review_agent/document_review_agent
- 反馈学习: feedback_learner
- 瓶颈检测/优化引擎/成本估算/需求估算
- 速率限制器 (rate_limiter)
- Alembic迁移 015-020

## 文档
- 商业化落地计划
- 8篇docs文档 (架构/API/部署/开发/贡献等)
- Docker Compose生产配置

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-05-10 19:50:20 +08:00

169 lines
6.8 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
"""
工作流自动优化引擎 — 根据瓶颈检测结果自动生成优化方案
"""
from __future__ import annotations
import logging
from typing import Any, Dict, List, Optional
from app.services.bottleneck_detector import bottleneck_detector
logger = logging.getLogger(__name__)
class OptimizationEngine:
"""工作流自动优化引擎 — 生成 DAG 优化方案"""
def analyze_and_optimize(self, hours: int = 24) -> Dict[str, Any]:
"""运行完整分析并生成优化方案。"""
analysis = bottleneck_detector.run_full_analysis(hours=hours)
bottlenecks = analysis.get("bottlenecks", [])
optimizations = self.generate_optimizations(bottlenecks)
dag_changes = self.generate_dag_changes(optimizations)
return {
"period_hours": hours,
"summary": analysis,
"optimizations": optimizations,
"dag_changes": dag_changes,
"requires_approval": len(optimizations) > 0,
}
def generate_optimizations(self, bottlenecks: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""根据瓶颈生成具体优化方案。"""
optimizations = []
for b in bottlenecks:
severity = b.get("severity", "low")
if severity == "low":
continue
opt = {
"node_type": b["node_type"],
"severity": severity,
"current_metrics": {
"p95_ms": b.get("p95_ms"),
"error_rate": b.get("error_rate"),
"p50_ms": b.get("p50_ms"),
},
"changes": [],
}
if b.get("is_bottleneck"):
opt["changes"].append({
"type": "parallelize",
"description": f"{b['node_type']} 拆分为并行子节点",
"estimated_improvement": "延迟降低 50-70%",
"implementation": {
"action": "replace_node",
"new_structure": "parallel_gateway",
"sub_nodes": self._suggest_split(b["node_type"]),
},
})
if b.get("is_problematic"):
opt["changes"].append({
"type": "add_validation",
"description": f"添加前置校验节点,失败率 {b['error_rate']:.1%}",
"estimated_improvement": "失败率降至 5% 以下",
"implementation": {
"action": "insert_before",
"new_node": {
"type": "validate",
"name": f"PreValidate_{b['node_type']}",
"config": {"required_fields": [], "timeout_ms": 5000},
},
},
})
opt["changes"].append({
"type": "add_retry",
"description": "添加重试逻辑:最多 3 次,指数退避",
"estimated_improvement": "间歇性失败自动恢复",
"implementation": {
"action": "update_config",
"config": {"retry_count": 3, "retry_delay_ms": 1000, "backoff": "exponential"},
},
})
if b.get("is_inefficient"):
opt["changes"].append({
"type": "add_cache",
"description": f"添加结果缓存TTL: 300s",
"estimated_improvement": "重复调用减少 30-60%",
"implementation": {
"action": "insert_before",
"new_node": {
"type": "cache_check",
"name": f"Cache_{b['node_type']}",
"config": {"ttl_seconds": 300, "key_fields": []},
},
},
})
if opt["changes"]:
optimizations.append(opt)
return optimizations
def _suggest_split(self, node_type: str) -> List[Dict[str, Any]]:
"""根据节点类型建议拆分方案。"""
suggestions = {
"llm_call": [
{"name": "PromptPreprocess", "type": "transform"},
{"name": "LLMCall_Fast", "type": "llm_call", "config": {"model": "fast"}},
{"name": "LLMCall_Accurate", "type": "llm_call", "config": {"model": "accurate"}},
{"name": "ResultMerge", "type": "merge"},
],
"api_request": [
{"name": "RequestPrepare", "type": "transform"},
{"name": "APICall_Core", "type": "api_request"},
{"name": "ResponseParse", "type": "transform"},
],
"data_query": [
{"name": "QueryBuilder", "type": "transform"},
{"name": "DBQuery", "type": "data_query"},
{"name": "ResultFormat", "type": "transform"},
],
"code_execution": [
{"name": "SandboxSetup", "type": "setup"},
{"name": "CodeRun", "type": "code_execution"},
{"name": "OutputParse", "type": "transform"},
],
}
return suggestions.get(node_type, [
{"name": "PreProcess", "type": "transform"},
{"name": "CoreExecute", "type": "execute"},
{"name": "PostProcess", "type": "transform"},
])
def generate_dag_changes(self, optimizations: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""将优化方案转换为 DAG 变更列表(可用于 diff 展示)。"""
changes = []
for opt in optimizations:
for change in opt.get("changes", []):
impl = change.get("implementation", {})
changes.append({
"node_type": opt["node_type"],
"severity": opt["severity"],
"action": impl.get("action"),
"description": change["description"],
"estimated_improvement": change.get("estimated_improvement"),
"detail": impl,
})
return changes
def apply_optimization(self, workflow_id: str, optimization_id: str,
approved_changes: List[str]) -> Dict[str, Any]:
"""应用用户确认的优化变更(创建新版本)。"""
# 这里与 workflow_version 系统集成,创建优化后的新版本
# 当前返回框架结果,实际集成在后续完成
return {
"workflow_id": workflow_id,
"status": "pending",
"message": f"优化方案已记录,{len(approved_changes)} 项变更待应用",
"applied_changes": approved_changes,
}
optimization_engine = OptimizationEngine()