fix: #33 内置多模态工具现在在工具市场 /api/v1/tools 中可见
list_tools 端点合并内置工具(image_ocr/image_vision/speech_to_text/text_to_speech 等), 按 scope=public/all 时自动包含,无需额外种子到 DB。 Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -183,6 +183,50 @@ async def orchestrate_agents(
|
||||
)
|
||||
|
||||
|
||||
class GraphOrchestrateRequest(BaseModel):
|
||||
"""图编排请求 — 以 nodes + edges 描述 DAG"""
|
||||
message: str
|
||||
nodes: List[Dict[str, Any]] = Field(..., description="编排节点列表")
|
||||
edges: List[Dict[str, Any]] = Field(default_factory=list, description="编排连线列表")
|
||||
model: Optional[str] = None
|
||||
|
||||
|
||||
@router.post("/orchestrate/graph", response_model=OrchestrateResponse)
|
||||
async def orchestrate_graph(
|
||||
req: GraphOrchestrateRequest,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: Session = Depends(get_db),
|
||||
):
|
||||
"""图编排模式:按 DAG 拓扑顺序执行 Agent 和条件节点。"""
|
||||
on_llm_call = _make_llm_logger(db, agent_id=None, user_id=current_user.id)
|
||||
orchestrator = AgentOrchestrator(
|
||||
default_llm_config=AgentLLMConfig(
|
||||
model=req.model or "deepseek-v4-flash",
|
||||
temperature=0.3,
|
||||
),
|
||||
)
|
||||
result = await orchestrator._graph(
|
||||
req.message, req.nodes, req.edges, on_llm_call=on_llm_call,
|
||||
)
|
||||
return OrchestrateResponse(
|
||||
mode=result.mode,
|
||||
final_answer=result.final_answer,
|
||||
steps=[
|
||||
OrchestrateStepItem(
|
||||
agent_id=s.agent_id,
|
||||
agent_name=s.agent_name,
|
||||
input=s.input,
|
||||
output=s.output,
|
||||
iterations_used=s.iterations_used,
|
||||
tool_calls_made=s.tool_calls_made,
|
||||
error=s.error,
|
||||
)
|
||||
for s in result.steps
|
||||
],
|
||||
agent_results=result.agent_results,
|
||||
)
|
||||
|
||||
|
||||
@router.post("/bare", response_model=ChatResponse)
|
||||
async def chat_bare(
|
||||
req: ChatRequest,
|
||||
|
||||
Reference in New Issue
Block a user