Files
aiagent/backend/scripts/create_zhini_kefu_13.py

281 lines
10 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
#!/usr/bin/env python3
"""
从「知你客服12号」复制为「知你客服13号」
- **画布**:去除自环边、合并重复的 source→target 边;统一锚点 sourceHandle=right、targetHandle=left
按从「开始」出发的分层布局重排节点坐标,减少交叉与「绕圈」观感(不改变节点 id/业务配置)。
- **提示词**:在 12 号能力http_request、file_read、file_write、system_info基础上增加工具调用纪律
(避免同轮重复 file_write、勿刷屏 DSML
若已存在同名 Agent「知你客服13号」则仅更新其 workflow + 描述(不新建)。
用法:
cd backend && .\\venv\\Scripts\\python.exe scripts/create_zhini_kefu_13.py
环境变量: PLATFORM_BASE_URL, PLATFORM_USERNAME, PLATFORM_PASSWORD,
SOURCE_AGENT_NAME默认 知你客服12号, TARGET_NAME默认 知你客服13号
"""
from __future__ import annotations
import copy
import json
import os
import sys
from collections import defaultdict
from typing import Any, Dict, List, Optional, Tuple
import requests
BASE = os.getenv("PLATFORM_BASE_URL", "http://127.0.0.1:8037").rstrip("/")
USER = os.getenv("PLATFORM_USERNAME", "admin")
PWD = os.getenv("PLATFORM_PASSWORD", "123456")
SOURCE_NAME = os.getenv("SOURCE_AGENT_NAME", "知你客服12号")
TARGET_NAME = os.getenv("TARGET_NAME", "知你客服13号")
TOOLS_V13 = ["http_request", "file_read", "file_write", "system_info"]
# 在 12 号提示词基础上追加create_zhini_kefu_12 正文过长时由脚本从源 Agent 读取再拼接)
PROMPT_V13_EXTRA = """
【画布/执行说明13 号)】
- 工作流连线已整理为从左到右主线,减少自环与重复边带来的误解;逻辑仍以引擎与节点配置为准。
【工具调用纪律13 号)】
- 同一轮用户请求中,对 **file_write** 无特殊说明时不要重复调用多次;每个明确文件需求通常 **一次写入** 即可。
- 不要在回复正文中 **重复刷屏** DSML、`<DSML`、`invoke name=` 等标签行;工具返回后应用自然语言说明,并仍以 **单行 JSON** 收尾。
- 若上一轮已写入成功,除非用户要求修改或另存,不要再次写入相同路径。
【单行 JSON 与用户画像(与 12 号一致,勿留空)】
- 最后一行 JSON 的 user_profile 须与事实一致:用户已告知昵称时须包含 "name"(如「小七」);**禁止**用空的 user_profile 覆盖会话记忆。
- 仅靠 file_write 写入本地文件**不能**替代上述 JSON 中的 user_profile多轮称呼以 JSON + 会话记忆为准。
"""
def _sanitize_edges(edges: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""去掉自环、按 (source,target) 去重,统一左右锚点。"""
seen: set = set()
out: List[Dict[str, Any]] = []
for e in edges or []:
s, t = e.get("source"), e.get("target")
if not s or not t:
continue
if s == t:
continue
key = (s, t)
if key in seen:
continue
seen.add(key)
ne = dict(e)
ne["sourceHandle"] = "right"
ne["targetHandle"] = "left"
if not ne.get("id"):
ne["id"] = f"edge_{s}_{t}"
out.append(ne)
return out
def _find_start_node_ids(nodes: List[Dict[str, Any]]) -> List[str]:
ids: List[str] = []
for n in nodes or []:
nid = n.get("id") or ""
nt = (n.get("type") or (n.get("data") or {}).get("type") or "").lower()
if nt == "start" or nid in ("start", "start-1") or str(nid).startswith("start-"):
ids.append(nid)
return ids
def _compute_ranks(
nodes: List[Dict[str, Any]], edges: List[Dict[str, Any]]
) -> Dict[str, int]:
node_ids = [n["id"] for n in nodes if n.get("id")]
start_ids = _find_start_node_ids(nodes)
incoming: Dict[str, int] = {nid: 0 for nid in node_ids}
for e in edges:
s, t = e.get("source"), e.get("target")
if not s or not t or s == t:
continue
if t in incoming:
incoming[t] += 1
if not start_ids:
start_ids = [nid for nid in node_ids if incoming.get(nid, 0) == 0] or ([node_ids[0]] if node_ids else [])
rank: Dict[str, int] = {s: 0 for s in start_ids}
nmax = max(len(nodes), 8)
for _ in range(nmax + 5):
updated = False
for e in edges:
s, t = e.get("source"), e.get("target")
if not s or not t or s == t:
continue
if s not in rank:
continue
nv = rank[s] + 1
if t not in rank or rank[t] < nv:
rank[t] = nv
updated = True
if not updated:
break
max_r = max(rank.values(), default=0)
for nid in node_ids:
if nid not in rank:
rank[nid] = max_r + 1
max_r += 1
return rank
def _apply_layered_positions(nodes: List[Dict[str, Any]], ranks: Dict[str, int]) -> None:
layers: Dict[int, List[str]] = defaultdict(list)
for nid, r in ranks.items():
layers[r].append(nid)
for r in layers:
layers[r].sort()
x0, y0 = 80.0, 140.0
x_step = 300.0
y_step = 110.0
for r in sorted(layers.keys()):
ids = layers[r]
nlen = len(ids)
y_base = y0 - (nlen - 1) * y_step / 2.0
for j, nid in enumerate(ids):
for node in nodes:
if node.get("id") != nid:
continue
pos = node.setdefault("position", {})
pos["x"] = x0 + r * x_step
pos["y"] = y_base + j * y_step
break
def improve_workflow_layout_and_edges(wf: Dict[str, Any]) -> Tuple[int, int]:
"""
返回 (去掉的自环条数, 去掉的重复边条数)。
"""
nodes = wf.get("nodes") or []
raw_edges = wf.get("edges") or []
loops = sum(
1
for e in raw_edges
if e.get("source") and e.get("target") and e.get("source") == e.get("target")
)
clean = _sanitize_edges(raw_edges)
removed_dup = len(raw_edges) - len(clean) - loops
wf["edges"] = clean
ranks = _compute_ranks(nodes, clean)
_apply_layered_positions(nodes, ranks)
return loops, max(0, removed_dup)
def _patch_llm_unified(wf: dict, base_prompt: Optional[str] = None) -> None:
for n in wf.get("nodes") or []:
if n.get("id") != "llm-unified":
continue
d = n.setdefault("data", {})
prompt = base_prompt if base_prompt else d.get("prompt") or ""
if PROMPT_V13_EXTRA.strip() not in prompt:
prompt = (prompt.rstrip() + "\n" + PROMPT_V13_EXTRA).strip()
d["prompt"] = prompt
d["enable_tools"] = True
d["tools"] = list(TOOLS_V13)
d["selected_tools"] = list(TOOLS_V13)
return
print("警告: 未找到节点 llm-unified", file=sys.stderr)
def _find_agent_id_by_name(h: Dict[str, str], name: str) -> Optional[str]:
r = requests.get(f"{BASE}/api/v1/agents", params={"search": name, "limit": 50}, headers=h, timeout=30)
if r.status_code != 200:
return None
for a in r.json() or []:
if a.get("name") == name:
return a.get("id")
return None
def main() -> int:
r = requests.post(
f"{BASE}/api/v1/auth/login",
data={"username": USER, "password": PWD},
headers={"Content-Type": "application/x-www-form-urlencoded"},
timeout=15,
)
if r.status_code != 200:
print("登录失败:", r.status_code, r.text[:500], file=sys.stderr)
return 1
token = r.json().get("access_token")
if not token:
print("无 access_token", file=sys.stderr)
return 1
h = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
src_id = _find_agent_id_by_name(h, SOURCE_NAME)
if not src_id:
print(f"未找到源 Agent: {SOURCE_NAME}", file=sys.stderr)
return 1
existing_13 = _find_agent_id_by_name(h, TARGET_NAME)
if existing_13:
print("已存在", TARGET_NAME, "-> 仅更新工作流", existing_13)
new_id = existing_13
g = requests.get(f"{BASE}/api/v1/agents/{new_id}", headers=h, timeout=30)
if g.status_code != 200:
print("读取失败:", g.text, file=sys.stderr)
return 1
agent = g.json()
else:
dup = requests.post(
f"{BASE}/api/v1/agents/{src_id}/duplicate",
headers=h,
json={"name": TARGET_NAME},
timeout=60,
)
if dup.status_code != 201:
print("复制失败:", dup.status_code, dup.text[:800], file=sys.stderr)
return 1
new_id = dup.json()["id"]
agent = dup.json()
print("已创建副本:", new_id, TARGET_NAME)
wf = copy.deepcopy(agent["workflow_config"])
loops, dup_edges = improve_workflow_layout_and_edges(wf)
print(f"连线整理: 去掉自环 {loops} 条, 合并重复边 {dup_edges}")
g2 = requests.get(f"{BASE}/api/v1/agents/{src_id}", headers=h, timeout=30)
base_prompt = None
if g2.status_code == 200:
try:
for n in g2.json().get("workflow_config", {}).get("nodes") or []:
if n.get("id") == "llm-unified":
base_prompt = (n.get("data") or {}).get("prompt")
break
except Exception:
pass
_patch_llm_unified(wf, base_prompt=base_prompt)
desc = (
"在知你客服12号基础上整理工作流连线去自环/重复边、分层布局、统一左右锚点),"
"并强化工具调用纪律(避免同轮重复 file_write、勿刷屏 DSML"
"工具仍为 http_request、file_read、file_write、system_info输出单行 JSON。"
)
up = requests.put(
f"{BASE}/api/v1/agents/{new_id}",
headers=h,
json={"description": desc, "workflow_config": wf},
timeout=120,
)
if up.status_code != 200:
print("更新失败:", up.status_code, up.text[:1200], file=sys.stderr)
return 1
print("已写入工具:", ", ".join(TOOLS_V13))
print("Agent ID:", new_id)
print(json.dumps({"id": new_id, "name": TARGET_NAME}, ensure_ascii=False))
return 0
if __name__ == "__main__":
raise SystemExit(main())