feat: add AI学习助手 agent (KG+RAG ideal) and renshenguo feishu bot

- Add AI学习助手 agent creation script with all 39 tools, 3-layer KG+RAG memory
- Add renshenguo (人参果) feishu bot integration (app_service + ws_handler)
- Register renshenguo WS client in main.py startup
- Add RENSHENGUO_APP_ID / RENSHENGUO_APP_SECRET / RENSHENGUO_AGENT_ID config
- Reorganize docs from root into docs/ subdirectories
- Move startup scripts to scripts/startup/
- Various backend optimizations and tool improvements

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
renjianbo
2026-05-06 01:37:13 +08:00
parent f33bc461ff
commit eabf90c496
171 changed files with 4906 additions and 445 deletions

View File

@@ -0,0 +1,126 @@
#!/usr/bin/env python3
"""
查看执行日志的脚本
用于诊断数据流转问题
"""
import sys
import os
import json
from datetime import datetime
# 添加项目路径
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'backend'))
from app.core.database import SessionLocal
from app.models.execution import Execution
from app.models.execution_log import ExecutionLog
def format_json(data):
"""格式化JSON数据"""
if isinstance(data, dict):
return json.dumps(data, ensure_ascii=False, indent=2)
return str(data)
def main():
"""主函数"""
db = SessionLocal()
try:
# 获取最近的执行记录
print("=" * 80)
print("查找最近的Agent执行记录...")
print("=" * 80)
execution = db.query(Execution).filter(
Execution.agent_id.isnot(None)
).order_by(Execution.created_at.desc()).first()
if not execution:
print("❌ 没有找到执行记录")
return
print(f"\n✅ 找到执行记录: {execution.id}")
print(f" 状态: {execution.status}")
print(f" 执行时间: {execution.execution_time}ms")
print(f" 创建时间: {execution.created_at}")
# 显示输入数据
print("\n" + "=" * 80)
print("输入数据 (input_data):")
print("=" * 80)
if execution.input_data:
print(format_json(execution.input_data))
else:
print("(空)")
# 显示输出数据
print("\n" + "=" * 80)
print("输出数据 (output_data):")
print("=" * 80)
if execution.output_data:
print(format_json(execution.output_data))
else:
print("(空)")
# 获取执行日志
print("\n" + "=" * 80)
print("执行日志 (按时间顺序):")
print("=" * 80)
logs = db.query(ExecutionLog).filter(
ExecutionLog.execution_id == execution.id
).order_by(ExecutionLog.timestamp.asc()).all()
if not logs:
print("❌ 没有找到执行日志")
return
for i, log in enumerate(logs, 1):
print(f"\n[{i}] {log.timestamp.strftime('%Y-%m-%d %H:%M:%S')} [{log.level}]")
print(f" 节点: {log.node_id or '(无)'} ({log.node_type or '(无)'})")
print(f" 消息: {log.message}")
if log.data:
print(f" 数据:")
data_str = format_json(log.data)
# 只显示前500个字符
if len(data_str) > 500:
print(data_str[:500] + "...")
else:
print(data_str)
if log.duration:
print(f" 耗时: {log.duration}ms")
# 特别关注LLM节点的输入输出
print("\n" + "=" * 80)
print("LLM节点详细分析:")
print("=" * 80)
llm_logs = [log for log in logs if log.node_type == 'llm']
if llm_logs:
for log in llm_logs:
if log.message == "节点开始执行" and log.data:
print(f"\n节点 {log.node_id} 的输入数据:")
input_data = log.data.get('input', {})
print(format_json(input_data))
if log.message == "节点执行完成" and log.data:
print(f"\n节点 {log.node_id} 的输出数据:")
output_data = log.data.get('output', {})
print(format_json(output_data))
else:
print("❌ 没有找到LLM节点的日志")
except Exception as e:
print(f"❌ 错误: {str(e)}")
import traceback
traceback.print_exc()
finally:
db.close()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,164 @@
#!/usr/bin/env python3
"""
查看Switch节点日志的专用脚本
用于诊断Switch节点的分支过滤问题
"""
import sys
import os
import json
from datetime import datetime
# 添加项目路径
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'backend'))
from app.core.database import SessionLocal
from app.models.execution import Execution
from app.models.execution_log import ExecutionLog
def format_json(data):
"""格式化JSON数据"""
if isinstance(data, dict):
return json.dumps(data, ensure_ascii=False, indent=2)
return str(data)
def main():
"""主函数"""
db = SessionLocal()
try:
# 获取最近的执行记录
print("=" * 80)
print("查找最近的Agent执行记录...")
print("=" * 80)
execution = db.query(Execution).filter(
Execution.agent_id.isnot(None)
).order_by(Execution.created_at.desc()).first()
if not execution:
print("❌ 没有找到执行记录")
return
print(f"\n✅ 找到执行记录: {execution.id}")
print(f" 状态: {execution.status}")
print(f" 执行时间: {execution.execution_time}ms")
print(f" 创建时间: {execution.created_at}")
# 获取执行日志
print("\n" + "=" * 80)
print("Switch节点相关日志:")
print("=" * 80)
logs = db.query(ExecutionLog).filter(
ExecutionLog.execution_id == execution.id
).order_by(ExecutionLog.timestamp.asc()).all()
if not logs:
print("❌ 没有找到执行日志")
return
# 筛选Switch节点相关的日志
switch_logs = []
for log in logs:
if log.node_type == 'switch' or 'Switch' in log.message or '[rjb] Switch' in log.message:
switch_logs.append(log)
if not switch_logs:
print("❌ 没有找到Switch节点相关的日志")
print("\n所有日志节点类型:")
node_types = set(log.node_type for log in logs if log.node_type)
for nt in sorted(node_types):
print(f" - {nt}")
return
print(f"\n找到 {len(switch_logs)} 条Switch节点相关日志:\n")
for i, log in enumerate(switch_logs, 1):
print(f"[{i}] {log.timestamp.strftime('%H:%M:%S.%f')[:-3]} [{log.level}]")
print(f" 节点: {log.node_id or '(无)'} ({log.node_type or '(无)'})")
print(f" 消息: {log.message}")
if log.data:
print(f" 数据:")
data_str = format_json(log.data)
# 显示完整数据
for line in data_str.split('\n'):
print(f" {line}")
if log.duration:
print(f" 耗时: {log.duration}ms")
print()
# 特别分析Switch节点的匹配和过滤过程
print("=" * 80)
print("Switch节点执行流程分析:")
print("=" * 80)
match_logs = [log for log in switch_logs if '匹配' in log.message]
filter_logs = [log for log in switch_logs if '过滤' in log.message]
if match_logs:
print("\n📊 匹配阶段:")
for log in match_logs:
if log.data:
data = log.data
print(f" 节点 {log.node_id}:")
print(f" 字段: {data.get('field', 'N/A')}")
print(f" 字段值: {data.get('field_value', 'N/A')}")
print(f" 匹配的分支: {data.get('matched_case', 'N/A')}")
print(f" 处理后的输入键: {data.get('processed_input_keys', 'N/A')}")
if filter_logs:
print("\n🔍 过滤阶段:")
for log in filter_logs:
if log.data:
data = log.data
print(f" 节点 {log.node_id}:")
print(f" 匹配的分支: {data.get('branch', 'N/A')}")
print(f" 过滤前边数: {data.get('edges_before', 'N/A')}")
print(f" 保留边数: {data.get('edges_kept', 'N/A')}")
print(f" 移除边数: {data.get('edges_removed', 'N/A')}")
# 检查意图理解节点的输出
print("\n" + "=" * 80)
print("意图理解节点输出分析:")
print("=" * 80)
intent_logs = [log for log in logs if log.node_id and 'intent' in log.node_id.lower()]
if intent_logs:
for log in intent_logs:
if log.message == "节点执行完成" and log.data:
print(f"\n节点 {log.node_id} 的输出:")
output = log.data.get('output', {})
print(format_json(output))
else:
print("❌ 没有找到意图理解节点的日志")
# 检查所有节点的输出(用于调试)
print("\n" + "=" * 80)
print("所有节点输出摘要:")
print("=" * 80)
node_outputs = {}
for log in logs:
if log.message == "节点执行完成" and log.node_id:
node_outputs[log.node_id] = log.data.get('output', {})
for node_id, output in node_outputs.items():
if isinstance(output, str) and len(output) > 100:
print(f"{node_id}: {output[:100]}...")
else:
print(f"{node_id}: {output}")
except Exception as e:
print(f"❌ 错误: {str(e)}")
import traceback
traceback.print_exc()
finally:
db.close()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,403 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
===========================================
📋 每日工作总结自动生成器
功能每天早上9点 → 生成今日工作总结 → 推送到指定渠道
===========================================
"""
import json
import os
import sys
import datetime
import smtplib
import requests
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from pathlib import Path
# ===========================================
# 📝 第一部分:配置区(请按需修改)
# ===========================================
class Config:
"""所有配置集中管理,方便修改"""
# --- 数据来源方式 ---
# mode = "manual" → 手动输入今日事项
# mode = "file" → 从文件读取今日事项
DATA_MODE = "manual"
# 当 DATA_MODE = "file" 时,读取此文件
DATA_FILE = "today_work_items.txt"
# --- 推送方式(可多选)---
# 支持wecom企业微信、dingtalk钉钉、email邮件
PUSH_CHANNELS = ["wecom"] # 可改为 ["dingtalk"] 或 ["email"]
# ----- 企业微信机器人 -----
WECOM_WEBHOOK = "https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=YOUR_KEY_HERE"
# ----- 钉钉机器人 -----
DINGTALK_WEBHOOK = "https://oapi.dingtalk.com/robot/send?access_token=YOUR_TOKEN_HERE"
# ----- 邮件配置 -----
SMTP_SERVER = "smtp.qq.com"
SMTP_PORT = 465
SMTP_USER = "your_email@qq.com"
SMTP_PASSWORD = "your_auth_code" # 邮件授权码,非密码
MAIL_TO = ["boss@company.com", "yourself@company.com"]
# --- 总结模板 ---
SUMMARY_TEMPLATE = """
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
📅 今日工作总结 · {date}
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
👤 姓名:{name}
🏢 部门:{department}
📌 今日工作事项:
{work_items}
📊 工作总结:
{summary}
✅ 明日计划:
{tomorrow_plan}
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
⏰ 生成时间:{generate_time}
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
"""
# ===========================================
# 🔧 第二部分:核心功能
# ===========================================
class WorkSummaryGenerator:
"""工作总结生成器"""
def __init__(self, name="张三", department="技术部"):
self.name = name
self.department = department
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
def get_work_items_from_input(self):
"""方式一:手动输入今日工作事项"""
print("\n📝 请输入你今天的工作事项(每行一条,输入空行结束):")
print("完成了XX功能开发 / 修复了XXbug / 参加了XX会议\n")
items = []
while True:
line = input("").strip()
if not line:
break
items.append(line)
return items
def get_work_items_from_file(self, filepath):
"""方式二:从文件读取今日工作事项"""
try:
with open(filepath, "r", encoding="utf-8") as f:
items = [line.strip() for line in f if line.strip()]
return items
except FileNotFoundError:
print(f"⚠️ 文件 {filepath} 不存在,请先创建!")
return []
def generate_summary(self, work_items):
"""生成工作总结(基于规则+模板你也可以接入AI API"""
if not work_items:
return "今日暂无工作记录。"
# 统计信息
total_items = len(work_items)
# 简单分类统计(关键词匹配)
categories = {
"开发": 0, "修复": 0, "会议": 0, "文档": 0,
"测试": 0, "沟通": 0, "其他": 0
}
keywords_map = {
"开发": ["开发", "编码", "编程", "实现", "搭建", "构建"],
"修复": ["修复", "解决", "处理", "修改", "bug", "Bug"],
"会议": ["会议", "评审", "讨论", "沟通会", "晨会", "周会"],
"文档": ["文档", "文档", "方案", "设计", "PPT", "报告"],
"测试": ["测试", "调试", "自测", "联调", "验证"],
"沟通": ["沟通", "对齐", "同步", "协调", "对接"],
}
for item in work_items:
matched = False
for cat, keywords in keywords_map.items():
if any(kw in item for kw in keywords):
categories[cat] += 1
matched = True
break
if not matched:
categories["其他"] += 1
# 生成总结文本
summary_parts = [f"今日共完成 {total_items} 项工作,具体如下:\n"]
# 分类汇总
active_cats = {k: v for k, v in categories.items() if v > 0}
if active_cats:
summary_parts.append("📊 工作类型分布:")
for cat, count in active_cats.items():
bar = "" * count
summary_parts.append(f" {cat}{bar} {count}")
summary_parts.append("")
# 逐项列出
summary_parts.append("📋 详细工作记录:")
for i, item in enumerate(work_items, 1):
summary_parts.append(f" {i}. {item}")
# 评价与建议
summary_parts.append(f"\n💡 今日小结:今日工作效率{'较高' if total_items >= 5 else '正常'}"
f"建议明日继续保持{'/优化时间管理' if total_items < 3 else ''}")
return "\n".join(summary_parts)
def generate_tomorrow_plan(self):
"""生成明日计划(可扩展为从配置文件读取)"""
plan_items = [
"继续推进当前开发任务",
"同步项目进度",
"代码审查",
]
return "\n".join(f" {i+1}. {item}" for i, item in enumerate(plan_items))
def build_summary_text(self, work_items, summary):
"""组装完整的总结文本"""
tomorrow_plan = self.generate_tomorrow_plan()
formatted_items = "\n".join(f"{item}" for item in work_items)
now_str = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return Config.SUMMARY_TEMPLATE.format(
date=self.today.strftime("%Y年%m月%d"),
name=self.name,
department=self.department,
work_items=formatted_items if formatted_items else " (暂无记录)",
summary=summary,
tomorrow_plan=tomorrow_plan,
generate_time=now_str
)
def run(self):
"""主运行流程"""
print(f"\n{'='*50}")
print(f"📅 今日工作总结生成器")
print(f"📆 {self.today.strftime('%Y年%m月%d')}")
print(f"{'='*50}\n")
# 1⃣ 获取工作事项
if Config.DATA_MODE == "file":
items = self.get_work_items_from_file(Config.DATA_FILE)
else:
items = self.get_work_items_from_input()
if not items:
print("⚠️ 没有输入任何工作事项,将生成空总结。")
# 2⃣ 生成总结
summary = self.generate_summary(items)
final_text = self.build_summary_text(items, summary)
# 3⃣ 本地保存
self.save_to_file(final_text)
# 4⃣ 推送
pusher = MessagePusher()
pusher.push(final_text)
return final_text
def save_to_file(self, content):
"""保存到本地文件"""
filename = f"工作总结_{self.today.strftime('%Y%m%d')}.md"
with open(filename, "w", encoding="utf-8") as f:
f.write(content)
print(f"\n✅ 已保存到本地文件:{os.path.abspath(filename)}")
# ===========================================
# 📨 第三部分:消息推送
# ===========================================
class MessagePusher:
"""消息推送器"""
def push(self, content):
"""根据配置推送消息到各个渠道"""
for channel in Config.PUSH_CHANNELS:
if channel == "wecom":
self.push_wecom(content)
elif channel == "dingtalk":
self.push_dingtalk(content)
elif channel == "email":
self.push_email(content)
else:
print(f"⚠️ 未知推送渠道: {channel}")
def push_wecom(self, content):
"""推送到企业微信机器人"""
url = Config.WECOM_WEBHOOK
if "YOUR_KEY_HERE" in url:
print("⚠️ 跳过企业微信推送未配置Webhook地址")
return
data = {
"msgtype": "markdown",
"markdown": {"content": content}
}
try:
resp = requests.post(url, json=data, timeout=10)
result = resp.json()
if result.get("errcode") == 0:
print("✅ 企业微信推送成功!")
else:
print(f"⚠️ 企业微信推送失败: {result}")
except Exception as e:
print(f"⚠️ 企业微信推送异常: {e}")
def push_dingtalk(self, content):
"""推送到钉钉机器人"""
url = Config.DINGTALK_WEBHOOK
if "YOUR_TOKEN_HERE" in url:
print("⚠️ 跳过钉钉推送未配置Webhook地址")
return
data = {
"msgtype": "markdown",
"markdown": {
"title": "今日工作总结",
"text": content
}
}
try:
resp = requests.post(url, json=data, timeout=10)
result = resp.json()
if result.get("errcode") == 0:
print("✅ 钉钉推送成功!")
else:
print(f"⚠️ 钉钉推送失败: {result}")
except Exception as e:
print(f"⚠️ 钉钉推送异常: {e}")
def push_email(self, content):
"""通过邮件推送"""
if "your_email" in Config.SMTP_USER:
print("⚠️ 跳过邮件推送:未配置邮箱信息")
return
try:
msg = MIMEMultipart()
msg["From"] = Config.SMTP_USER
msg["To"] = ", ".join(Config.MAIL_TO)
msg["Subject"] = f"今日工作总结 - {datetime.date.today().strftime('%Y-%m-%d')}"
msg.attach(MIMEText(content, "plain", "utf-8"))
with smtplib.SMTP_SSL(Config.SMTP_SERVER, Config.SMTP_PORT) as server:
server.login(Config.SMTP_USER, Config.SMTP_PASSWORD)
server.sendmail(Config.SMTP_USER, Config.MAIL_TO, msg.as_string())
print("✅ 邮件推送成功!")
except Exception as e:
print(f"⚠️ 邮件推送异常: {e}")
# ===========================================
# 🚀 第四部分:程序入口
# ===========================================
def setup_windows_task():
"""生成Windows任务计划程序导入脚本"""
script_path = os.path.abspath(sys.argv[0])
python_path = sys.executable
cmd = f'''@echo off
echo ========================================
echo 设置每日9点自动运行工作总结脚本
echo ========================================
REM 请以管理员身份运行此.bat文件
schtasks /create /tn "每日工作总结" /tr "{python_path} {script_path}" /sc daily /st 09:00 /f
if %errorlevel%==0 (
echo ✅ 定时任务创建成功每天早上9:00自动运行。
) else (
echo ❌ 创建失败,请尝试以管理员身份运行。
)
pause
'''
bat_path = "setup_windows_task.bat"
with open(bat_path, "w", encoding="utf-8") as f:
f.write(cmd)
print(f"\n📁 Windows定时任务脚本已生成{os.path.abspath(bat_path)}")
print(" → 右键点击该文件,选择「以管理员身份运行」即可设置定时任务!")
def setup_linux_cron():
"""输出Linux crontab配置说明"""
script_path = os.path.abspath(sys.argv[0])
python_path = sys.executable
print("\n" + "="*50)
print("🐧 Linux/macOS 定时任务设置crontab")
print("="*50)
print(f"在终端中执行以下命令添加定时任务:")
print(f"\n crontab -e")
print(f"\n然后添加一行:")
print(f"\n 0 9 * * * {python_path} {script_path}")
print(f"\n保存退出即可!📌")
if __name__ == "__main__":
import sys
# 检查是否有命令行参数
if len(sys.argv) > 1:
arg = sys.argv[1].lower()
if arg == "--setup-win":
setup_windows_task()
sys.exit(0)
elif arg == "--setup-linux":
setup_linux_cron()
sys.exit(0)
print("""
╔══════════════════════════════════════════╗
║ 🍊 每日工作总结生成器 ║
║ ║
║ 用法: ║
║ python daily_work_summary.py ║
║ python daily_work_summary.py --setup-win ║
║ python daily_work_summary.py --setup-linux ║
╚══════════════════════════════════════════╝
""")
# 运行主程序
generator = WorkSummaryGenerator(name="张三", department="技术部")
result = generator.run()
print("\n" + "="*50)
print("🎉 工作总结生成完成!")
print("="*50)
# 生成本地定时任务设置脚本
if sys.platform == "win32":
setup_windows_task()
else:
setup_linux_cron()
print("\n💡 小提示:修改 config.py 中的配置可自定义推送到不同渠道~")

View File

@@ -0,0 +1,108 @@
#!/usr/bin/env python3
"""
调试Switch节点的详细脚本
"""
import sys
import os
import json
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'backend'))
from app.core.database import SessionLocal
from app.models.execution import Execution
from app.models.agent import Agent
def main():
db = SessionLocal()
try:
# 获取最近的执行记录
execution = db.query(Execution).filter(
Execution.agent_id.isnot(None)
).order_by(Execution.created_at.desc()).first()
if not execution:
print("❌ 没有找到执行记录")
return
print(f"执行ID: {execution.id}")
print(f"状态: {execution.status}")
print()
# 获取Agent配置
agent = db.query(Agent).filter(Agent.id == execution.agent_id).first()
if not agent:
print("❌ 没有找到Agent")
return
workflow_config = agent.workflow_config
nodes = workflow_config.get('nodes', [])
edges = workflow_config.get('edges', [])
# 找到Switch节点
switch_node = None
for node in nodes:
if node.get('type') == 'switch':
switch_node = node
break
if not switch_node:
print("❌ 没有找到Switch节点")
return
print("=" * 80)
print("Switch节点配置:")
print("=" * 80)
print(f"节点ID: {switch_node['id']}")
print(f"字段: {switch_node['data'].get('field')}")
print(f"Cases: {json.dumps(switch_node['data'].get('cases', {}), ensure_ascii=False, indent=2)}")
print(f"Default: {switch_node['data'].get('default')}")
print()
# 找到从Switch节点出发的边
print("=" * 80)
print("从Switch节点出发的边:")
print("=" * 80)
switch_edges = [e for e in edges if e.get('source') == switch_node['id']]
for edge in switch_edges:
print(f"边ID: {edge.get('id')}")
print(f" sourceHandle: {edge.get('sourceHandle')}")
print(f" target: {edge.get('target')}")
print()
# 查看执行结果
print("=" * 80)
print("执行结果中的节点输出:")
print("=" * 80)
if execution.output_data and 'node_results' in execution.output_data:
node_results = execution.output_data['node_results']
if switch_node['id'] in node_results:
switch_result = node_results[switch_node['id']]
print(f"Switch节点输出: {json.dumps(switch_result, ensure_ascii=False, indent=2)}")
else:
print("❌ Switch节点没有输出结果")
else:
print("❌ 没有找到节点执行结果")
# 检查哪些分支节点执行了
print()
print("=" * 80)
print("执行了的分支节点:")
print("=" * 80)
if execution.output_data and 'node_results' in execution.output_data:
node_results = execution.output_data['node_results']
for edge in switch_edges:
target_id = edge.get('target')
if target_id in node_results:
print(f"{target_id} (sourceHandle: {edge.get('sourceHandle')})")
else:
print(f"{target_id} (sourceHandle: {edge.get('sourceHandle')}) - 未执行")
except Exception as e:
print(f"❌ 错误: {str(e)}")
import traceback
traceback.print_exc()
finally:
db.close()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,102 @@
#!/usr/bin/env python3
"""
发布Agent脚本
"""
import requests
import sys
BASE_URL = "http://localhost:8037"
def login(username="admin", password="123456"):
"""用户登录"""
login_data = {
"username": username,
"password": password
}
try:
response = requests.post(f"{BASE_URL}/api/v1/auth/login", data=login_data)
if response.status_code != 200:
print(f"❌ 登录失败: {response.status_code}")
return None, None
token = response.json().get("access_token")
if not token:
print("❌ 登录失败: 未获取到token")
return None, None
print(f"✅ 登录成功 (用户: {username})")
headers = {"Authorization": f"Bearer {token}"}
return token, headers
except Exception as e:
print(f"❌ 登录异常: {str(e)}")
return None, None
def deploy_agent(agent_id, headers):
"""发布Agent"""
try:
response = requests.post(
f"{BASE_URL}/api/v1/agents/{agent_id}/deploy",
headers=headers
)
if response.status_code == 200:
agent = response.json()
print(f"✅ Agent发布成功: {agent.get('name')} (状态: {agent.get('status')})")
return True
else:
print(f"❌ 发布失败: {response.status_code}")
print(f"响应: {response.text}")
return False
except Exception as e:
print(f"❌ 发布异常: {str(e)}")
return False
def find_agent_by_name(agent_name, headers):
"""通过名称查找Agent"""
try:
response = requests.get(
f"{BASE_URL}/api/v1/agents",
headers=headers,
params={"search": agent_name, "limit": 100}
)
if response.status_code != 200:
print(f"❌ 获取Agent列表失败: {response.status_code}")
return None
agents = response.json()
for agent in agents:
if agent.get("name") == agent_name:
return agent
return None
except Exception as e:
print(f"❌ 查找Agent异常: {str(e)}")
return None
if __name__ == "__main__":
agent_name = sys.argv[1] if len(sys.argv) > 1 else "知识库问答助手"
print(f"📝 发布Agent: {agent_name}\n")
# 登录
token, headers = login()
if not token:
sys.exit(1)
# 查找Agent
print(f"🔍 查找Agent: {agent_name}")
agent = find_agent_by_name(agent_name, headers)
if not agent:
print(f"❌ 未找到Agent: {agent_name}")
sys.exit(1)
print(f"✅ 找到Agent: {agent.get('name')} (ID: {agent.get('id')}, 状态: {agent.get('status')})")
# 发布Agent
print(f"\n🚀 发布Agent...")
if deploy_agent(agent.get('id'), headers):
print("\n✅ 完成!")
else:
sys.exit(1)

View File

@@ -0,0 +1,279 @@
#!/usr/bin/env python3
"""
读取 agent_test_cases.json或 --cases 指定),批量执行 Agent 并做简单断言。
规范见:(红头)agent测试用例文档.md
"""
from __future__ import annotations
import argparse
import json
import os
import sys
import time
from typing import Any, Dict, List, Optional, Tuple
import requests
DEFAULT_CASES_FILE = "agent_test_cases.json"
def _ensure_utf8_stdio() -> None:
if sys.platform != "win32":
return
for name in ("stdout", "stderr"):
stream = getattr(sys, name, None)
if stream is not None and hasattr(stream, "reconfigure"):
try:
stream.reconfigure(encoding="utf-8", errors="replace")
except Exception:
pass
_ensure_utf8_stdio()
def _login(base_url: str, username: str, password: str, timeout: float) -> Optional[Dict[str, str]]:
r = requests.post(
f"{base_url}/api/v1/auth/login",
data={"username": username, "password": password},
headers={"Content-Type": "application/x-www-form-urlencoded"},
timeout=timeout,
)
if r.status_code != 200:
print(f"[FAIL] 登录 {r.status_code}: {r.text[:500]}")
return None
token = r.json().get("access_token")
if not token:
print("[FAIL] 登录响应无 access_token")
return None
return {"Authorization": f"Bearer {token}"}
def _resolve_agent_id(
base_url: str,
headers: Dict[str, str],
agent: Dict[str, Any],
timeout: float,
) -> Optional[str]:
if agent.get("id"):
return str(agent["id"])
name = agent.get("name")
if not name:
print("[FAIL] agent 需包含 id 或 name")
return None
r = requests.get(
f"{base_url}/api/v1/agents",
headers=headers,
params={"search": name, "limit": 100},
timeout=timeout,
)
if r.status_code != 200:
print(f"[FAIL] 查找 Agent {r.status_code}: {r.text[:500]}")
return None
agents: List[Dict[str, Any]] = r.json() or []
exact = [a for a in agents if (a.get("name") or "").strip() == name]
pick = exact[0] if exact else (agents[0] if agents else None)
if not pick:
print(f"[FAIL] 未找到 Agent: {name}")
return None
print(f"[OK] Agent: {pick.get('name')} ({pick['id']}) status={pick.get('status')}")
return str(pick["id"])
def _extract_output_text(output_data: Any) -> str:
if output_data is None:
return ""
if isinstance(output_data, str):
return output_data
if isinstance(output_data, dict):
for key in ("result", "output", "text", "content"):
v = output_data.get(key)
if v is not None:
return v if isinstance(v, str) else str(v)
return json.dumps(output_data, ensure_ascii=False)
return str(output_data)
def _poll_until_terminal(
base_url: str,
headers: Dict[str, str],
execution_id: str,
max_wait: float,
poll_interval: float,
timeout: float,
) -> Tuple[str, Optional[Dict[str, Any]]]:
deadline = time.time() + max_wait
last_status = "unknown"
while time.time() < deadline:
sr = requests.get(
f"{base_url}/api/v1/executions/{execution_id}/status",
headers=headers,
timeout=timeout,
)
if sr.status_code != 200:
print(f"[WARN] status {sr.status_code}: {sr.text[:300]}")
time.sleep(poll_interval)
continue
body = sr.json()
last_status = str(body.get("status") or "")
if last_status in ("completed", "failed", "cancelled", "awaiting_approval"):
break
time.sleep(poll_interval)
dr = requests.get(
f"{base_url}/api/v1/executions/{execution_id}",
headers=headers,
timeout=timeout,
)
if dr.status_code != 200:
print(f"[FAIL] 获取执行详情 {dr.status_code}: {dr.text[:500]}")
return last_status, None
return last_status, dr.json()
def _check_expect(text: str, status: str, detail: Optional[Dict[str, Any]], expect: Dict[str, Any]) -> List[str]:
errors: List[str] = []
want_status = expect.get("status", "completed")
if status != want_status:
errors.append(f"状态期望 {want_status!r},实际 {status!r}")
if detail and status != "completed":
em = detail.get("error_message")
if em:
errors.append(f"error_message: {em[:500]}")
if not expect:
return errors
ci = bool(expect.get("case_insensitive"))
hay = text if not ci else text.lower()
for sub in expect.get("output_contains") or []:
s = sub if not ci else sub.lower()
if s not in hay:
errors.append(f"输出应包含 {sub!r}")
for sub in expect.get("output_not_contains") or []:
s = sub if not ci else sub.lower()
if s in hay:
errors.append(f"输出不应包含 {sub!r}")
return errors
def _run_one_case(
base_url: str,
headers: Dict[str, str],
defaults: Dict[str, Any],
case: Dict[str, Any],
) -> bool:
cid = case.get("id", "(no-id)")
title = case.get("name", "")
print("\n" + "-" * 60)
print(f"CASE {cid}" + (f"{title}" if title else ""))
req_timeout = float(case.get("request_timeout_sec", defaults.get("request_timeout_sec", 120)))
max_wait = float(case.get("max_wait_sec", defaults.get("max_wait_sec", 300)))
poll_iv = float(case.get("poll_interval_sec", defaults.get("poll_interval_sec", 2)))
agent_id = _resolve_agent_id(base_url, headers, case.get("agent") or {}, req_timeout)
if not agent_id:
return False
message = case.get("message")
if message is None:
print("[FAIL] 缺少 message")
return False
input_data: Dict[str, Any] = {"query": message, "USER_INPUT": message}
extra = case.get("input_extra")
if isinstance(extra, dict):
input_data = {**extra, **input_data}
er = requests.post(
f"{base_url}/api/v1/executions",
headers=headers,
json={"agent_id": agent_id, "input_data": input_data},
timeout=req_timeout,
)
if er.status_code != 201:
print(f"[FAIL] 创建执行 {er.status_code}: {er.text[:800]}")
return False
ex = er.json()
eid = ex["id"]
print(f"[OK] execution_id={eid}")
st, detail = _poll_until_terminal(base_url, headers, eid, max_wait, poll_iv, req_timeout)
text = _extract_output_text((detail or {}).get("output_data"))
expect = case.get("expect") or {}
errs = _check_expect(text, st, detail, expect)
if errs:
for e in errs:
print(f"[FAIL] {e}")
if text:
print("[OUTPUT_PREVIEW]")
print(text[:2000] + ("" if len(text) > 2000 else ""))
return False
print(f"[OK] 通过 status={st}")
if text:
print("[OUTPUT_PREVIEW]")
print(text[:1200] + ("" if len(text) > 1200 else ""))
return True
def main() -> int:
ap = argparse.ArgumentParser(description="批量运行 Agent 测试用例JSON")
ap.add_argument(
"--cases",
default=os.environ.get("AGENT_TEST_CASES", DEFAULT_CASES_FILE),
help=f"用例 JSON 路径(默认 {DEFAULT_CASES_FILE}",
)
ap.add_argument("--username", default=None)
ap.add_argument("--password", default=None)
ap.add_argument("--base-url", default=None, help="覆盖 defaults.base_url / API_BASE_URL")
args = ap.parse_args()
path = args.cases
if not os.path.isfile(path):
print(f"[FAIL] 找不到用例文件: {path}")
print("请先按 (红头)agent测试用例文档.md 创建 JSON或复制示例为 agent_test_cases.json")
return 2
with open(path, encoding="utf-8") as f:
spec = json.load(f)
defaults = spec.get("defaults") or {}
base_url = (
args.base_url
or defaults.get("base_url")
or os.environ.get("API_BASE_URL", "http://localhost:8037")
)
base_url = base_url.rstrip("/")
username = args.username or defaults.get("username", "admin")
password = args.password or defaults.get("password", "123456")
req_timeout = float(defaults.get("request_timeout_sec", 120))
print(f"API: {base_url}")
print(f"用例文件: {path}")
headers = _login(base_url, username, password, req_timeout)
if not headers:
return 3
cases: List[Dict[str, Any]] = spec.get("cases") or []
if not cases:
print("[FAIL] cases 为空")
return 4
ok, skip, fail = 0, 0, 0
for case in cases:
if case.get("enabled") is False:
print(f"\n[SKIP] {case.get('id', '?')}")
skip += 1
continue
if _run_one_case(base_url, headers, defaults, case):
ok += 1
else:
fail += 1
print("\n" + "=" * 60)
print(f"汇总: 通过 {ok} 失败 {fail} 跳过 {skip}")
return 0 if fail == 0 else 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,43 @@
#!/bin/bash
# 开放8037和8038端口的脚本
echo "正在检查并开放端口 8037 和 8038..."
# 方法1: 使用firewalld (CentOS/RHEL 7+)
if command -v firewall-cmd &> /dev/null; then
echo "检测到 firewalld使用 firewalld 开放端口..."
sudo firewall-cmd --permanent --add-port=8037/tcp
sudo firewall-cmd --permanent --add-port=8038/tcp
sudo firewall-cmd --reload
echo "✅ firewalld 端口已开放"
exit 0
fi
# 方法2: 使用ufw (Ubuntu/Debian)
if command -v ufw &> /dev/null; then
echo "检测到 ufw使用 ufw 开放端口..."
sudo ufw allow 8037/tcp
sudo ufw allow 8038/tcp
echo "✅ ufw 端口已开放"
exit 0
fi
# 方法3: 使用iptables
echo "使用 iptables 开放端口..."
sudo iptables -I INPUT -p tcp --dport 8037 -j ACCEPT
sudo iptables -I INPUT -p tcp --dport 8038 -j ACCEPT
# 保存iptables规则根据系统不同
if [ -f /etc/redhat-release ]; then
# CentOS/RHEL
sudo service iptables save 2>/dev/null || sudo iptables-save > /etc/sysconfig/iptables
elif [ -f /etc/debian_version ]; then
# Debian/Ubuntu
sudo iptables-save > /etc/iptables/rules.v4 2>/dev/null || echo "请手动保存iptables规则"
fi
echo "✅ iptables 端口已开放"
echo ""
echo "⚠️ 注意:如果使用云服务器(如腾讯云、阿里云等),还需要在云控制台配置安全组规则:"
echo " - 开放入站规则TCP 8037"
echo " - 开放入站规则TCP 8038"

View File

@@ -0,0 +1,63 @@
#!/bin/bash
# 数据库迁移脚本
# 用于创建模板市场相关的数据库表
echo "=========================================="
echo "执行数据库迁移 - 模板市场"
echo "=========================================="
echo ""
# 数据库连接信息从config.py中获取
DB_HOST="gz-cynosdbmysql-grp-d26pzce5.sql.tencentcdb.com"
DB_PORT="24936"
DB_USER="root"
DB_NAME="agent_db"
SQL_FILE="backend/create_template_market_tables.sql"
# 检查SQL文件是否存在
if [ ! -f "$SQL_FILE" ]; then
echo "❌ SQL文件不存在: $SQL_FILE"
exit 1
fi
echo "📄 SQL文件: $SQL_FILE"
echo "🔗 数据库: $DB_NAME @ $DB_HOST:$DB_PORT"
echo ""
# 检查mysql命令是否可用
if ! command -v mysql &> /dev/null; then
echo "⚠️ mysql命令不可用请手动执行SQL脚本"
echo ""
echo "手动执行步骤:"
echo "1. 连接到数据库:"
echo " mysql -h $DB_HOST -P $DB_PORT -u $DB_USER -p $DB_NAME"
echo ""
echo "2. 执行SQL"
echo " source $(pwd)/$SQL_FILE;"
echo ""
exit 1
fi
# 提示输入密码
echo "请输入数据库密码:"
read -s DB_PASSWORD
echo ""
echo "正在执行SQL脚本..."
echo ""
# 执行SQL脚本
mysql -h "$DB_HOST" -P "$DB_PORT" -u "$DB_USER" -p"$DB_PASSWORD" "$DB_NAME" < "$SQL_FILE"
if [ $? -eq 0 ]; then
echo ""
echo "✅ 数据库迁移完成!"
echo ""
echo "📊 验证表是否创建:"
mysql -h "$DB_HOST" -P "$DB_PORT" -u "$DB_USER" -p"$DB_PASSWORD" "$DB_NAME" -e "SHOW TABLES LIKE 'workflow_template%'; SHOW TABLES LIKE 'template_%';"
else
echo ""
echo "❌ 数据库迁移失败,请检查错误信息"
exit 1
fi