## 安全修复 (12项) - Webhook接口添加全局Token认证,过滤敏感请求头 - 修复JWT Base64 padding公式,防止签名验证绕过 - 数据库密码/飞书Token从源码移除,改为环境变量 - 工作流引擎添加路径遍历防护 (_resolve_safe_path) - eval()添加模板长度上限检查 - 审批API添加认证依赖 - 前端v-html增强XSS转义,console.log仅开发模式输出 - 500错误不再暴露内部异常详情 ## Agent运行时修复 (7项) - 删除_inject_knowledge_context中未定义db变量的finally块 - 工具执行添加try/except保护,异常不崩溃Agent - LLM重试计入budget计数器 - self_review异常时passed=False - max_iterations截断标记success=False - 工具参数JSON解析失败时记录警告日志 - run()开始时重置_llm_invocations计数器 ## 配置与基础设施 - DEBUG默认False,SQL_ECHO独立配置项 - init_db()补全13个缺失模型导入 - 新增WEBHOOK_AUTH_TOKEN/SQL_ECHO配置项 - 新增.env.example模板文件 ## 前端修复 (12项) - 登录改用URLSearchParams替代FormData - 401拦截器通过Pinia store统一清理状态 - SSE流超时从60s延长至300s - final/error事件时清除streamTimeout - localStorage聊天记录添加24h TTL - safeParseArgCount替代模板中裸JSON.parse - fetchUser 401时同时清除user对象 ## 新增模块 - 知识进化: knowledge_extractor/retriever/tasks - 数字孪生: shadow_executor/comparison模型 - 行为采集: behavior_middleware/collector/fingerprint_engine - 代码审查: code_review_agent/document_review_agent - 反馈学习: feedback_learner - 瓶颈检测/优化引擎/成本估算/需求估算 - 速率限制器 (rate_limiter) - Alembic迁移 015-020 ## 文档 - 商业化落地计划 - 8篇docs文档 (架构/API/部署/开发/贡献等) - Docker Compose生产配置 Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
300 lines
12 KiB
Python
300 lines
12 KiB
Python
"""
|
||
决策授权体系 — L0-L4 五级风险授权,数字分身根据风险等级自动判定执行权限
|
||
"""
|
||
from __future__ import annotations
|
||
|
||
import logging
|
||
from typing import Any, Dict, List, Optional
|
||
|
||
from app.services.fingerprint_engine import fingerprint_engine
|
||
from app.services.shadow_executor import shadow_executor
|
||
from app.services.behavior_collector import behavior_collector
|
||
|
||
logger = logging.getLogger(__name__)
|
||
|
||
AUTH_LEVELS = {
|
||
"L0": {
|
||
"name": "全自动执行",
|
||
"description": "低风险操作,数字分身直接执行",
|
||
"risk_range": (0, 0.2),
|
||
"requires_approval": False,
|
||
"notify": False,
|
||
"examples": ["格式化代码", "拼写检查修正", "简单文案调整"],
|
||
},
|
||
"L1": {
|
||
"name": "自动执行+通知",
|
||
"description": "低风险,自动执行后通知用户",
|
||
"risk_range": (0.2, 0.4),
|
||
"requires_approval": False,
|
||
"notify": True,
|
||
"examples": ["合并小PR", "回复常规邮件", "更新文档"],
|
||
},
|
||
"L2": {
|
||
"name": "建议+确认",
|
||
"description": "中风险,数字分身生成建议,需用户一次确认",
|
||
"risk_range": (0.4, 0.6),
|
||
"requires_approval": True,
|
||
"notify": True,
|
||
"examples": ["修改API接口", "调整配置参数", "代码重构"],
|
||
},
|
||
"L3": {
|
||
"name": "详细审批",
|
||
"description": "高风险,需要详细说明和多重审批",
|
||
"risk_range": (0.6, 0.8),
|
||
"requires_approval": True,
|
||
"notify": True,
|
||
"examples": ["修改数据库Schema", "涉及资金的变更", "权限调整"],
|
||
},
|
||
"L4": {
|
||
"name": "禁止自动",
|
||
"description": "极高风险,始终需要人工操作",
|
||
"risk_range": (0.8, 1.0),
|
||
"requires_approval": True,
|
||
"notify": True,
|
||
"examples": ["删除生产数据", "修改权限体系", "涉及合规操作"],
|
||
},
|
||
}
|
||
|
||
RISK_FACTORS = {
|
||
"data_mutation": {"weight": 0.25, "keywords": ["DELETE", "DROP", "UPDATE", "INSERT", "ALTER", "TRUNCATE", "删除", "修改", "写入", "变更"]},
|
||
"permission_change": {"weight": 0.25, "keywords": ["权限", "role", "permission", "auth", "授权", "访问控制"]},
|
||
"financial": {"weight": 0.30, "keywords": ["支付", "金额", "资金", "退款", "结算", "payment", "refund", "price", "cost", "费用"]},
|
||
"production_impact": {"weight": 0.20, "keywords": ["生产", "production", "线上", "prod", "正式环境", "发布", "deploy"]},
|
||
"user_data": {"weight": 0.15, "keywords": ["用户数据", "user.*data", "PII", "手机号", "身份证", "隐私"]},
|
||
"irreversible": {"weight": 0.20, "keywords": ["不可逆", "irreversible", "清空", "重置", "reset", "hard delete"]},
|
||
}
|
||
|
||
|
||
class DecisionAuthorizer:
|
||
"""决策授权引擎 — L0-L4 风险定级"""
|
||
|
||
def evaluate(self, user_id: str, action: str, target: str = "",
|
||
context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
||
"""评估操作风险等级并返回授权决策。
|
||
|
||
Args:
|
||
user_id: 用户ID
|
||
action: 操作描述(如 "修改数据库schema", "回复邮件")
|
||
target: 操作目标(如具体表名、文件路径)
|
||
context: 附加上下文信息
|
||
"""
|
||
context = context or {}
|
||
|
||
# 1. 计算风险分数
|
||
risk_score, risk_factors = self._calculate_risk(action, target, context)
|
||
|
||
# 2. 确定授权等级
|
||
auth_level = self._determine_level(risk_score)
|
||
|
||
# 3. 用户信任度调整
|
||
fp = fingerprint_engine.get_fingerprint(user_id)
|
||
trust_bonus = self._compute_trust_bonus(user_id, fp)
|
||
adjusted_score = max(0, risk_score - trust_bonus)
|
||
adjusted_level = self._determine_level(adjusted_score)
|
||
|
||
# 4. 影子模式对比(如果是新类别操作)
|
||
shadow_comparison = None
|
||
category = context.get("category", self._infer_category(action))
|
||
if risk_score > 0.3:
|
||
shadow_suggestion = shadow_executor.generate_suggestion(user_id, category, {
|
||
"action": action, "target": target, "risk_score": risk_score,
|
||
})
|
||
shadow_comparison = {
|
||
"suggestion_generated": True,
|
||
"shadow_confidence": shadow_suggestion.get("confidence", 0),
|
||
}
|
||
|
||
decision = {
|
||
"action": action,
|
||
"target": target,
|
||
"risk_score": round(risk_score, 3),
|
||
"adjusted_score": round(adjusted_score, 3),
|
||
"raw_level": auth_level,
|
||
"adjusted_level": adjusted_level,
|
||
"trust_bonus": round(trust_bonus, 3),
|
||
"risk_factors": risk_factors,
|
||
"requires_approval": AUTH_LEVELS[adjusted_level]["requires_approval"],
|
||
"notify_user": AUTH_LEVELS[adjusted_level]["notify"],
|
||
"level_detail": AUTH_LEVELS[adjusted_level],
|
||
"shadow_comparison": shadow_comparison,
|
||
}
|
||
|
||
# 如果需要审批,生成审批要求
|
||
if decision["requires_approval"]:
|
||
decision["approval_requirements"] = self._generate_approval_requirements(
|
||
adjusted_level, action, risk_factors
|
||
)
|
||
|
||
# 记录行为
|
||
behavior_collector.log_fire_and_forget(
|
||
user_id=user_id,
|
||
category="decision",
|
||
action="authorize_action",
|
||
context={"action": action, "target": target},
|
||
result={"level": adjusted_level, "risk_score": adjusted_score},
|
||
)
|
||
|
||
return decision
|
||
|
||
def _calculate_risk(self, action: str, target: str,
|
||
context: Dict[str, Any]) -> tuple:
|
||
"""计算操作风险分数和风险因子。"""
|
||
text = f"{action} {target}"
|
||
total_score = 0
|
||
max_score = 0
|
||
matched_factors = []
|
||
|
||
for factor_name, factor in RISK_FACTORS.items():
|
||
max_score += factor["weight"]
|
||
for keyword in factor["keywords"]:
|
||
if keyword.lower() in text.lower():
|
||
severity = self._factor_severity(factor_name, text)
|
||
contribution = factor["weight"] * severity
|
||
total_score += contribution
|
||
matched_factors.append({
|
||
"factor": factor_name,
|
||
"contribution": round(contribution, 3),
|
||
"severity": round(severity, 2),
|
||
})
|
||
break
|
||
|
||
# 归一化
|
||
if max_score > 0:
|
||
risk_score = min(1.0, total_score / max_score)
|
||
else:
|
||
risk_score = 0.1
|
||
|
||
# 上下文调整
|
||
if context.get("is_dry_run"):
|
||
risk_score *= 0.3
|
||
if context.get("has_rollback_plan"):
|
||
risk_score *= 0.7
|
||
if context.get("is_peak_hours"):
|
||
risk_score *= 1.2
|
||
|
||
return min(1.0, risk_score), matched_factors
|
||
|
||
def _factor_severity(self, factor_name: str, text: str) -> float:
|
||
"""评估单个风险因子的严重程度。"""
|
||
if factor_name == "financial":
|
||
if any(kw in text for kw in ["退款", "refund", "实际扣款"]):
|
||
return 1.0
|
||
return 0.7
|
||
if factor_name == "irreversible":
|
||
if "删除" in text or "DELETE" in text.upper():
|
||
return 1.0
|
||
return 0.6
|
||
if factor_name == "production_impact":
|
||
if "生产" in text or "production" in text.lower():
|
||
return 0.9
|
||
return 0.5
|
||
return 0.5
|
||
|
||
def _determine_level(self, score: float) -> str:
|
||
"""根据分数确定授权等级。"""
|
||
if score <= 0.2:
|
||
return "L0"
|
||
elif score <= 0.4:
|
||
return "L1"
|
||
elif score <= 0.6:
|
||
return "L2"
|
||
elif score <= 0.8:
|
||
return "L3"
|
||
else:
|
||
return "L4"
|
||
|
||
def _compute_trust_bonus(self, user_id: str,
|
||
fp: Optional[Dict[str, Any]]) -> float:
|
||
"""根据用户历史行为计算信任加成。"""
|
||
if not fp:
|
||
return 0
|
||
|
||
total = fp.get("total_behaviors", 0)
|
||
avg_response = fp.get("avg_response_time_ms")
|
||
|
||
bonus = 0
|
||
# 行为数据量
|
||
if total > 500:
|
||
bonus += 0.1
|
||
elif total > 100:
|
||
bonus += 0.05
|
||
|
||
# 平均响应快 → 经验丰富
|
||
if avg_response and avg_response < 30000:
|
||
bonus += 0.05
|
||
|
||
# 影子模式准确率(如果有)
|
||
try:
|
||
accuracy = shadow_executor.get_accuracy(user_id)
|
||
avg_acc = accuracy.get("average_accuracy", 0)
|
||
if avg_acc > 0.9:
|
||
bonus += 0.1
|
||
elif avg_acc > 0.8:
|
||
bonus += 0.05
|
||
except Exception:
|
||
pass
|
||
|
||
return min(bonus, 0.2)
|
||
|
||
def _infer_category(self, action: str) -> str:
|
||
"""从操作描述推断类别。"""
|
||
if any(kw in action for kw in ["代码", "code", "PR", "review"]):
|
||
return "code_review"
|
||
if any(kw in action for kw in ["邮件", "email", "回复", "reply"]):
|
||
return "email"
|
||
if any(kw in action for kw in ["文档", "document", "合同", "contract"]):
|
||
return "document"
|
||
return "decision"
|
||
|
||
def _generate_approval_requirements(self, level: str, action: str,
|
||
risk_factors: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||
"""生成审批要求。"""
|
||
requirements = {
|
||
"L2": {
|
||
"approvers": ["直接上级或项目负责人"],
|
||
"detail_required": ["操作摘要", "影响范围", "回滚方案"],
|
||
"auto_approve_after_hours": 24,
|
||
},
|
||
"L3": {
|
||
"approvers": ["技术负责人", "产品负责人"],
|
||
"detail_required": ["操作摘要", "详细方案", "影响范围评估", "回滚方案", "测试结果"],
|
||
"auto_approve_after_hours": 48,
|
||
},
|
||
"L4": {
|
||
"approvers": ["技术负责人", "产品负责人", "安全负责人"],
|
||
"detail_required": ["操作摘要", "详细方案", "影响范围评估", "风险评估报告", "回滚方案", "应急预案"],
|
||
"auto_approve_after_hours": None, # 永不自动批准
|
||
},
|
||
}
|
||
|
||
base = requirements.get(level, requirements["L2"])
|
||
base["risk_factors"] = [r["factor"] for r in risk_factors]
|
||
base["action"] = action
|
||
return base
|
||
|
||
def get_authorization_summary(self, user_id: str, days: int = 30) -> Dict[str, Any]:
|
||
"""获取用户授权历史摘要。"""
|
||
# 这里需要 behavior_collector 的查询能力
|
||
behaviors = behavior_collector.get_user_behaviors(
|
||
user_id=user_id, category="decision", limit=100
|
||
)
|
||
|
||
by_level = {"L0": 0, "L1": 0, "L2": 0, "L3": 0, "L4": 0}
|
||
for b in behaviors:
|
||
result = b.get("result") if isinstance(b, dict) else (b.result or {})
|
||
level = result.get("level", "L2") if isinstance(result, dict) else "L2"
|
||
by_level[level] = by_level.get(level, 0) + 1
|
||
|
||
return {
|
||
"user_id": user_id,
|
||
"period_days": days,
|
||
"total_decisions": sum(by_level.values()),
|
||
"by_level": by_level,
|
||
"auto_executed": by_level["L0"] + by_level["L1"],
|
||
"required_approval": by_level["L2"] + by_level["L3"] + by_level["L4"],
|
||
"auto_rate": round((by_level["L0"] + by_level["L1"]) / max(sum(by_level.values()), 1), 3),
|
||
}
|
||
|
||
|
||
decision_authorizer = DecisionAuthorizer()
|