Files
aiagent/backend/app/agent_runtime/orchestrator.py

378 lines
13 KiB
Python
Raw Normal View History

"""
Agent Orchestrator Agent 编排引擎
支持三种协作模式
- route: Router Agent 分析问题 分发到最合适的 Specialist Agent
- sequential: Agent 流水线执行前者输出作为后者输入
- debate: 多个 Agent 独立回答 Aggregator 汇总为最终答案
"""
from __future__ import annotations
import json
import logging
import uuid
from typing import Any, Callable, Dict, List, Optional
from pydantic import BaseModel, Field
from app.agent_runtime import (
AgentRuntime,
AgentConfig,
AgentLLMConfig,
AgentToolConfig,
AgentResult,
)
from app.agent_runtime.core import _LLMClient
logger = logging.getLogger(__name__)
class OrchestratorAgentConfig(BaseModel):
"""编排中单个 Agent 的配置"""
id: str = Field(..., description="Agent 标识")
name: str = Field(default="Agent", description="显示名称")
system_prompt: str = Field(default="你是一个有用的AI助手。")
model: str = Field(default="deepseek-v4-flash")
provider: str = Field(default="deepseek")
temperature: float = 0.7
max_iterations: int = 10
tools: List[str] = Field(default_factory=list, description="工具白名单,空=全部")
description: str = Field(default="", description="Agent 专长描述(路由模式用)")
class OrchestratorStep(BaseModel):
"""编排中的单步执行记录"""
agent_id: str
agent_name: str
input: str = ""
output: str = ""
iterations_used: int = 0
tool_calls_made: int = 0
error: Optional[str] = None
class OrchestratorResult(BaseModel):
"""编排执行结果"""
mode: str
final_answer: str
steps: List[OrchestratorStep] = Field(default_factory=list)
agent_results: List[Dict[str, Any]] = Field(default_factory=list)
_ROUTER_SYSTEM_PROMPT = """你是一个路由调度员。你的任务是从以下 Specialist Agent 中选择一个最适合处理用户问题的 Agent。
可用的 Specialist Agent
{agent_list}
请返回 JSON 格式不要 markdown 包裹包含
1. "selected_agent": 选中的 Agent ID
2. "reason": 选择理由一句话
规则
- 选择与问题最匹配的 Agent
- 如果问题涉及多个领域选择最相关的那个
- 必须从上述列表中选择不能编造 Agent ID"""
_AGGREGATOR_SYSTEM_PROMPT = """你是一个回答汇总员。多个 AI Agent 对同一个问题给出了不同的回答。
请分析所有回答输出一份综合的最终答案
- 如果各 Agent 回答一致合并要点
- 如果有分歧指出不同观点并给出你的判断
- 以专业清晰的格式输出最终答案"""
class AgentOrchestrator:
"""
Agent 编排器
用法
orch = AgentOrchestrator()
result = await orch.run("route", question, [agent1, agent2, agent3])
"""
def __init__(self, default_llm_config: Optional[AgentLLMConfig] = None):
self._default_llm = default_llm_config or AgentLLMConfig(
model="deepseek-v4-flash",
temperature=0.3,
)
async def run(
self,
mode: str,
question: str,
agents: List[OrchestratorAgentConfig],
on_llm_call: Optional[Callable[[Dict[str, Any]], Any]] = None,
) -> OrchestratorResult:
"""执行多 Agent 编排。"""
mode = mode.lower()
if mode == "route":
return await self._route(question, agents, on_llm_call)
elif mode == "sequential":
return await self._sequential(question, agents, on_llm_call)
elif mode == "debate":
return await self._debate(question, agents, on_llm_call)
else:
raise ValueError(f"不支持的编排模式: {mode},可选: route, sequential, debate")
async def _route(
self, question: str, agents: List[OrchestratorAgentConfig],
on_llm_call: Optional[Callable] = None,
) -> OrchestratorResult:
"""路由模式Router → Specialist。"""
# 构建 Agent 列表描述
agent_lines = []
for a in agents:
desc = a.description or a.name
agent_lines.append(f"- id: {a.id}, name: {a.name}, description: {desc}")
agent_list_str = "\n".join(agent_lines)
router_prompt = _ROUTER_SYSTEM_PROMPT.format(agent_list=agent_list_str)
# 创建 Router Agent
router_runtime = AgentRuntime(
AgentConfig(
name="router",
system_prompt=router_prompt,
llm=AgentLLMConfig(
model=self._default_llm.model,
temperature=0.1, # 低温度确保确定性
),
tools=AgentToolConfig(
include_tools=[], # Router 不需要工具
),
),
on_llm_call=on_llm_call,
)
router_result = await router_runtime.run(question)
if not router_result.success:
return OrchestratorResult(
mode="route",
final_answer=f"路由决策失败: {router_result.content}",
steps=[],
)
# 解析 Router 的输出
selected_agent_id = None
try:
parsed = json.loads(router_result.content.strip().removeprefix("```json").removesuffix("```").strip())
selected_agent_id = parsed.get("selected_agent", "")
except (json.JSONDecodeError, AttributeError):
# 尝试从文本中提取
for a in agents:
if a.id in router_result.content:
selected_agent_id = a.id
break
if not selected_agent_id:
# 取第一个
selected_agent_id = agents[0].id if agents else ""
# 找到对应的 Specialist Agent
specialist = next((a for a in agents if a.id == selected_agent_id), agents[0] if agents else None)
if not specialist:
return OrchestratorResult(
mode="route",
final_answer="没有可用的 Specialist Agent",
steps=[],
)
# 运行 Specialist Agent
specialist_runtime = AgentRuntime(
AgentConfig(
name=specialist.name,
system_prompt=specialist.system_prompt,
llm=AgentLLMConfig(
model=specialist.model,
provider=specialist.provider,
temperature=specialist.temperature,
max_iterations=specialist.max_iterations,
),
tools=AgentToolConfig(
include_tools=specialist.tools,
),
),
on_llm_call=on_llm_call,
)
specialist_result = await specialist_runtime.run(question)
return OrchestratorResult(
mode="route",
final_answer=specialist_result.content,
steps=[
OrchestratorStep(
agent_id="router",
agent_name="Router",
input=question,
output=f"选择: {specialist.name} ({specialist.id})",
),
OrchestratorStep(
agent_id=specialist.id,
agent_name=specialist.name,
input=question,
output=specialist_result.content[:300],
iterations_used=specialist_result.iterations_used,
tool_calls_made=specialist_result.tool_calls_made,
),
],
agent_results=[
{"agent_id": specialist.id, "agent_name": specialist.name, "output": specialist_result.content},
],
)
async def _sequential(
self, question: str, agents: List[OrchestratorAgentConfig],
on_llm_call: Optional[Callable] = None,
) -> OrchestratorResult:
"""顺序模式Agent A 输出 → Agent B 输入。"""
if not agents:
return OrchestratorResult(mode="sequential", final_answer="无 Agent 可执行")
steps: List[OrchestratorStep] = []
current_input = question
for i, agent_cfg in enumerate(agents):
runtime = AgentRuntime(
AgentConfig(
name=agent_cfg.name,
system_prompt=agent_cfg.system_prompt,
llm=AgentLLMConfig(
model=agent_cfg.model,
provider=agent_cfg.provider,
temperature=agent_cfg.temperature,
max_iterations=agent_cfg.max_iterations,
),
tools=AgentToolConfig(
include_tools=agent_cfg.tools,
),
),
on_llm_call=on_llm_call,
)
# 第一个 Agent 接收原始问题,后续 Agent 接收前一个的输出
agent_input = current_input
if i > 0:
agent_input = (
f"这是前一个 Agent 的处理结果,请在此基础上继续处理。\n\n"
f"原始问题: {question}\n\n"
f"前序输出:\n{current_input}"
)
result = await runtime.run(agent_input)
step = OrchestratorStep(
agent_id=agent_cfg.id,
agent_name=agent_cfg.name,
input=agent_input[:200],
output=result.content[:500],
iterations_used=result.iterations_used,
tool_calls_made=result.tool_calls_made,
error=None if result.success else result.error,
)
steps.append(step)
if not result.success:
break
current_input = result.content
final_answer = steps[-1].output if steps else "无输出"
return OrchestratorResult(
mode="sequential",
final_answer=final_answer,
steps=steps,
agent_results=[
{"agent_id": s.agent_id, "agent_name": s.agent_name, "output": s.output}
for s in steps
],
)
async def _debate(
self, question: str, agents: List[OrchestratorAgentConfig],
on_llm_call: Optional[Callable] = None,
) -> OrchestratorResult:
"""辩论模式:多 Agent 独立回答 → Aggregator 汇总。"""
if not agents:
return OrchestratorResult(mode="debate", final_answer="无 Agent 可执行")
steps: List[OrchestratorStep] = []
agent_outputs: List[Dict[str, Any]] = []
# 第一阶段:所有 Agent 独立回答
for agent_cfg in agents:
runtime = AgentRuntime(
AgentConfig(
name=agent_cfg.name,
system_prompt=agent_cfg.system_prompt,
llm=AgentLLMConfig(
model=agent_cfg.model,
provider=agent_cfg.provider,
temperature=agent_cfg.temperature,
max_iterations=agent_cfg.max_iterations,
),
tools=AgentToolConfig(
include_tools=agent_cfg.tools,
),
),
on_llm_call=on_llm_call,
)
result = await runtime.run(question)
step = OrchestratorStep(
agent_id=agent_cfg.id,
agent_name=agent_cfg.name,
input=question,
output=result.content[:500],
iterations_used=result.iterations_used,
tool_calls_made=result.tool_calls_made,
error=None if result.success else result.error,
)
steps.append(step)
agent_outputs.append({
"agent_id": agent_cfg.id,
"agent_name": agent_cfg.name,
"output": result.content,
})
# 第二阶段Aggregator 汇总所有回答
if len(agent_outputs) >= 2:
outputs_text = "\n\n---\n\n".join(
f"## {ao['agent_name']} 的回答\n{ao['output']}" for ao in agent_outputs
)
aggregator_prompt = (
f"用户问题: {question}\n\n"
f"以下是多个 AI Agent 对该问题的回答:\n\n{outputs_text}\n\n"
"请综合所有回答,输出一份完整、准确的最终答案。"
)
aggregator_runtime = AgentRuntime(
AgentConfig(
name="aggregator",
system_prompt=_AGGREGATOR_SYSTEM_PROMPT,
llm=AgentLLMConfig(
model=self._default_llm.model,
temperature=0.3,
),
tools=AgentToolConfig(include_tools=[]),
),
on_llm_call=on_llm_call,
)
final_result = await aggregator_runtime.run(aggregator_prompt)
final_answer = final_result.content
steps.append(OrchestratorStep(
agent_id="aggregator",
agent_name="Aggregator",
input="汇总各 Agent 回答",
output=final_answer[:500],
))
else:
final_answer = agent_outputs[0]["output"] if agent_outputs else "无回答"
return OrchestratorResult(
mode="debate",
final_answer=final_answer,
steps=steps,
agent_results=agent_outputs,
)