From bd3f8be781930a4e98491a0609c6ba7be64e61f6 Mon Sep 17 00:00:00 2001 From: renjianbo <18691577328@163.com> Date: Wed, 8 Apr 2026 11:44:24 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20=E5=B7=A5=E4=BD=9C=E6=B5=81=E8=AE=B0?= =?UTF-8?q?=E5=BF=86=E4=B8=8E=E5=86=85=E7=BD=AE=E5=B7=A5=E5=85=B7=E3=80=81?= =?UTF-8?q?=E7=9F=A5=E4=BD=A0=E5=AE=A2=E6=9C=8D=E8=84=9A=E6=9C=AC=E3=80=81?= =?UTF-8?q?Agent=E7=AE=A1=E7=90=86=E6=8A=80=E8=83=BD=E5=B1=95=E7=A4=BA?= =?UTF-8?q?=E4=B8=8E=E8=83=BD=E5=8A=9B=E9=85=8D=E7=BD=AE=E3=80=81=E6=96=87?= =?UTF-8?q?=E6=A1=A3=E4=B8=8EWindows=E5=90=AF=E5=8A=A8=E8=84=9A=E6=9C=AC?= =?UTF-8?q?=EF=BC=9B=E5=BF=BD=E7=95=A5=20redis=5Ftemp=20=E4=BA=8C=E8=BF=9B?= =?UTF-8?q?=E5=88=B6=E7=9B=AE=E5=BD=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made-with: Cursor --- .gitignore | 3 + 123.md | 2 + 456.md | 1 + Windows启动指南.md | 317 ++ agent记忆实现方案.md | 85 + backend/alembic/versions/003_add_rbac.py | 2 +- .../versions/005_persistent_user_memory.py | 49 + backend/app/core/config.py | 16 +- backend/app/core/tools_bootstrap.py | 72 + backend/app/main.py | 70 +- backend/app/models/__init__.py | 3 +- backend/app/models/persistent_user_memory.py | 34 + backend/app/services/builtin_tools.py | 153 +- backend/app/services/llm_service.py | 514 ++- .../app/services/persistent_memory_service.py | 127 + backend/app/services/tool_registry.py | 12 +- backend/app/services/workflow_engine.py | 1085 +++++- backend/app/tasks/agent_tasks.py | 4 + backend/app/tasks/workflow_tasks.py | 20 +- backend/env.example | 10 +- backend/scripts/create_zhini_kefu_10.py | 269 ++ backend/scripts/create_zhini_kefu_11.py | 122 + backend/scripts/create_zhini_kefu_12.py | 132 + backend/scripts/create_zhini_kefu_13.py | 280 ++ backend/scripts/create_zhini_kefu_14.py | 297 ++ backend/scripts/create_zhini_kefu_7.py | 106 + backend/scripts/create_zhini_kefu_8.py | 125 + backend/scripts/create_zhini_kefu_9.py | 243 ++ backend/scripts/debug_cache_query.py | 37 + backend/scripts/e2e_zhini11_test.py | 153 + backend/scripts/e2e_zhini12_123_md.py | 99 + backend/scripts/e2e_zhini12_bbb_md.py | 184 ++ backend/scripts/e2e_zhini12_bbbb_md.py | 239 ++ backend/scripts/e2e_zhini12_file_test.py | 142 + backend/scripts/e2e_zhini7_two_rounds.py | 171 + backend/scripts/e2e_zhini9_test.py | 116 + ...patch_code_build_memory_value_no_import.py | 57 + .../patch_zhini_code_build_context_scope.py | 51 + .../patch_zhini_kefu_12_prompt_tools.py | 91 + backend/scripts/restart_api_worker.ps1 | 41 + .../scripts/test_write_user_data_aaa_md.py | 61 + backend/scripts/update_zhini7_prompt.py | 25 + frontend/pnpm-lock.yaml | 2936 +++++++++++++++++ frontend/src/components/AgentChatPreview.vue | 37 +- .../WorkflowEditor/WorkflowEditor.vue | 356 +- frontend/src/utils/agentSkills.ts | 76 + frontend/src/views/Agents.vue | 222 +- frontend/vite.config.ts | 2 +- start_windows.cmd | 219 ++ start_windows.ps1 | 239 ++ test_zhini_kefu_6.py | 134 + user_data/.gitkeep | 0 user_data/aaa.md | 3 + user_data/abb.md | 1 + user_data/abc.md | 1 + user_data/bbb.md | 1 + user_data/bbbb.md | 1 + user_data/ccc.md | 1 + user_data/xxx.md | 1 + user_profile.json | 1 + 上传git仓.md | 2 + 望庐山瀑布.md | 30 + 知你客服14号能力文档.md | 68 + 知你客服能力的集成和扩展方案.md | 156 + 静夜思.md | 4 + 项目核心文档汇总.md | 462 +++ 66 files changed, 10104 insertions(+), 469 deletions(-) create mode 100644 123.md create mode 100644 456.md create mode 100644 Windows启动指南.md create mode 100644 agent记忆实现方案.md create mode 100644 backend/alembic/versions/005_persistent_user_memory.py create mode 100644 backend/app/core/tools_bootstrap.py create mode 100644 backend/app/models/persistent_user_memory.py create mode 100644 backend/app/services/persistent_memory_service.py create mode 100644 backend/scripts/create_zhini_kefu_10.py create mode 100644 backend/scripts/create_zhini_kefu_11.py create mode 100644 backend/scripts/create_zhini_kefu_12.py create mode 100644 backend/scripts/create_zhini_kefu_13.py create mode 100644 backend/scripts/create_zhini_kefu_14.py create mode 100644 backend/scripts/create_zhini_kefu_7.py create mode 100644 backend/scripts/create_zhini_kefu_8.py create mode 100644 backend/scripts/create_zhini_kefu_9.py create mode 100644 backend/scripts/debug_cache_query.py create mode 100644 backend/scripts/e2e_zhini11_test.py create mode 100644 backend/scripts/e2e_zhini12_123_md.py create mode 100644 backend/scripts/e2e_zhini12_bbb_md.py create mode 100644 backend/scripts/e2e_zhini12_bbbb_md.py create mode 100644 backend/scripts/e2e_zhini12_file_test.py create mode 100644 backend/scripts/e2e_zhini7_two_rounds.py create mode 100644 backend/scripts/e2e_zhini9_test.py create mode 100644 backend/scripts/patch_code_build_memory_value_no_import.py create mode 100644 backend/scripts/patch_zhini_code_build_context_scope.py create mode 100644 backend/scripts/patch_zhini_kefu_12_prompt_tools.py create mode 100644 backend/scripts/restart_api_worker.ps1 create mode 100644 backend/scripts/test_write_user_data_aaa_md.py create mode 100644 backend/scripts/update_zhini7_prompt.py create mode 100644 frontend/pnpm-lock.yaml create mode 100644 frontend/src/utils/agentSkills.ts create mode 100644 start_windows.cmd create mode 100644 start_windows.ps1 create mode 100644 test_zhini_kefu_6.py create mode 100644 user_data/.gitkeep create mode 100644 user_data/aaa.md create mode 100644 user_data/abb.md create mode 100644 user_data/abc.md create mode 100644 user_data/bbb.md create mode 100644 user_data/bbbb.md create mode 100644 user_data/ccc.md create mode 100644 user_data/xxx.md create mode 100644 user_profile.json create mode 100644 上传git仓.md create mode 100644 望庐山瀑布.md create mode 100644 知你客服14号能力文档.md create mode 100644 知你客服能力的集成和扩展方案.md create mode 100644 静夜思.md create mode 100644 项目核心文档汇总.md diff --git a/.gitignore b/.gitignore index 6419c6f..17b28e5 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,6 @@ logs/ # 操作系统 .DS_Store Thumbs.db + +# 本地 Redis Windows 分发(勿提交二进制) +redis_temp/ diff --git a/123.md b/123.md new file mode 100644 index 0000000..6172d80 --- /dev/null +++ b/123.md @@ -0,0 +1,2 @@ +# 123 +e2e zhini12 123.md marker diff --git a/456.md b/456.md new file mode 100644 index 0000000..0868514 --- /dev/null +++ b/456.md @@ -0,0 +1 @@ +# 456.md 文件已创建 \ No newline at end of file diff --git a/Windows启动指南.md b/Windows启动指南.md new file mode 100644 index 0000000..89df55f --- /dev/null +++ b/Windows启动指南.md @@ -0,0 +1,317 @@ +# Windows 本地启动指南 + +## 前置要求 + +### 已安装的软件 +- ✅ Python 3.12.7(已安装) +- ✅ Node.js 22.13.0(已安装) +- ✅ npm 10.9.2(已安装) +- ✅ pnpm 10.33.0(已安装) + +### 需要安装的软件 +- ❌ Redis(需要安装) + +## 步骤 1:安装 Redis(选择一种方式) + +### 选项 A:使用 Docker 运行 Redis(推荐,最简单) + +如果你不介意使用 Docker 来运行 Redis(其他服务仍在本地运行): + +1. **安装 Docker Desktop** + - 下载地址:https://www.docker.com/products/docker-desktop/ + - 安装后启动 Docker Desktop + +2. **启动 Redis 容器** + ```bash + docker run -d --name redis -p 6380:6379 redis:7-alpine + ``` + +3. **验证 Redis 是否运行** + ```bash + docker ps + ``` + 应该能看到 Redis 容器正在运行。 + +### 选项 B:安装 Redis for Windows + +1. **下载 Redis Windows 版本** + - 从 GitHub 下载:https://github.com/microsoftarchive/redis/releases + - 下载 `Redis-x64-3.2.100.msi` + +2. **安装 Redis** + - 运行安装程序,按照默认设置安装 + - 安装完成后,Redis 会作为 Windows 服务运行 + +3. **修改 Redis 端口(可选)** + - 默认 Redis 运行在 6379 端口 + - 如果需要使用 6380 端口(与 docker-compose 配置一致),需要修改配置文件 + - 配置文件位置:`C:\Program Files\Redis\redis.windows-service.conf` + - 找到 `port 6379` 改为 `port 6380` + - 重启 Redis 服务 + +### 选项 C:使用 WSL 安装 Redis + +如果你有 WSL(Windows Subsystem for Linux): + +```bash +# 在 WSL 中运行 +sudo apt update +sudo apt install redis-server +sudo service redis-server start +# 需要配置 Redis 允许远程连接 +``` + +## 步骤 2:配置后端环境 + +### 1. 创建虚拟环境并安装依赖 + +```bash +cd backend + +# 创建虚拟环境 +python -m venv venv + +# 激活虚拟环境 +# Windows CMD: +venv\Scripts\activate +# Windows PowerShell: +.\venv\Scripts\Activate.ps1 +# Git Bash: +source venv/Scripts/activate + +# 安装依赖 +pip install -r requirements.txt +``` + +### 2. 配置环境变量 + +```bash +# 复制环境变量文件 +copy env.example .env +``` + +编辑 `.env` 文件,确保以下配置正确: + +```ini +# 数据库配置(已配置为腾讯云MySQL,无需修改) +DATABASE_URL=mysql+pymysql://root:!Rjb12191@gz-cynosdbmysql-grp-d26pzce5.sql.tencentcdb.com:24936/agent_db?charset=utf8mb4 + +# Redis配置(根据你的Redis安装方式选择) +# 如果使用Docker Redis(端口6380): +REDIS_URL=redis://localhost:6380/0 +# 如果使用Windows Redis(默认端口6379): +# REDIS_URL=redis://localhost:6379/0 + +# CORS配置 +CORS_ORIGINS=http://localhost:3000,http://127.0.0.1:3000,http://localhost:8038,http://101.43.95.130:8038 + +# DeepSeek API密钥(已有) +DEEPSEEK_API_KEY=sk-fdf7cc1c73504e628ec0119b7e11b8cc +DEEPSEEK_BASE_URL=https://api.deepseek.com +``` + +### 3. 运行数据库迁移 + +```bash +# 确保虚拟环境已激活 +alembic upgrade head +``` + +### 4. 启动后端服务 + +```bash +uvicorn app.main:app --host 0.0.0.0 --port 8037 --reload +``` + +后端服务将在 http://localhost:8037 启动。 + +### 5. 启动 Celery Worker(新终端) + +```bash +# 在新终端中,进入backend目录并激活虚拟环境 +cd backend +venv\Scripts\activate + +# 启动 Celery Worker +celery -A app.core.celery_app worker --loglevel=info +``` + +## 步骤 3:配置前端环境 + +### 1. 安装依赖 + +```bash +cd frontend +pnpm install +``` + +### 2. 配置 API 地址 + +编辑 `frontend/vite.config.ts`,确保代理配置正确: + +```typescript +export default defineConfig({ + server: { + port: 3000, + proxy: { + '/api': { + target: 'http://localhost:8037', + changeOrigin: true, + }, + '/ws': { + target: 'ws://localhost:8037', + ws: true, + }, + }, + }, +}) +``` + +### 3. 启动前端服务 + +```bash +pnpm dev +``` + +前端服务将在 http://localhost:3000 启动。 +注意:访问地址是 http://localhost:3000,不是 8038(8038 是 Docker 映射端口)。 + +## 步骤 4:验证服务 + +### 1. 检查服务状态 + +- **后端API**: http://localhost:8037 +- **API文档**: http://localhost:8037/docs +- **前端**: http://localhost:3000 + +### 2. 测试健康检查 + +```bash +curl http://localhost:8037/health +``` +应该返回:`{"status":"healthy"}` + +## 步骤 5:创建第一个工作流 + +1. 访问 http://localhost:3000 +2. 注册新用户或使用现有账户登录 +3. 点击"创建工作流"进入可视化编辑器 +4. 拖拽节点、连接、配置并保存 +5. 运行工作流测试 + +## 常见问题 + +### 1. Redis 连接失败 + +**错误信息**: +``` +redis.exceptions.ConnectionError: Error 10061 connecting to localhost:6380 +``` + +**解决方案**: +- 检查 Redis 是否正在运行 +- 确认 Redis 端口是否正确 +- 检查防火墙是否阻止了 Redis 端口 + +### 2. 数据库连接失败 + +**错误信息**: +``` +pymysql.err.OperationalError: (2003, "Can't connect to MySQL server...") +``` + +**解决方案**: +- 检查网络连接(腾讯云数据库需要互联网访问) +- 确认数据库连接信息正确 +- 检查数据库是否允许远程连接 + +### 3. 前端无法连接后端 + +**错误信息**: +``` +Proxy error: Could not proxy request /api/auth/me from localhost:3000 to http://localhost:8037 +``` + +**解决方案**: +- 检查后端服务是否正在运行(http://localhost:8037) +- 检查前端代理配置(vite.config.ts) +- 检查 CORS 配置(.env 文件中的 CORS_ORIGINS) + +### 4. Celery 任务不执行 + +**解决方案**: +- 检查 Celery Worker 是否正在运行 +- 检查 Redis 连接是否正常 +- 查看 Celery Worker 日志 + +### 5. 端口被占用 + +**解决方案**: +- 检查端口 8037 和 3000 是否被其他程序占用 +- 可以修改端口: + - 后端:修改启动命令端口 `--port 8038` + - 前端:修改 `vite.config.ts` 中的 `port` + +## 快速启动脚本 + +### Windows CMD 脚本 (`start_all.cmd`) + +```batch +@echo off +echo 启动低代码智能体平台... + +REM 启动后端服务 +start cmd /k "cd /d backend && venv\Scripts\activate && uvicorn app.main:app --host 0.0.0.0 --port 8037 --reload" + +REM 启动 Celery Worker +start cmd /k "cd /d backend && venv\Scripts\activate && celery -A app.core.celery_app worker --loglevel=info" + +REM 启动前端服务 +start cmd /k "cd /d frontend && pnpm dev" + +echo 服务启动完成! +echo 前端: http://localhost:3000 +echo 后端API: http://localhost:8037/docs +``` + +### PowerShell 脚本 (`start_all.ps1`) + +```powershell +Write-Host "启动低代码智能体平台..." -ForegroundColor Green + +# 启动后端服务 +Start-Process powershell -ArgumentList "-NoExit", "-Command", "cd backend; .\venv\Scripts\Activate.ps1; uvicorn app.main:app --host 0.0.0.0 --port 8037 --reload" + +# 启动 Celery Worker +Start-Process powershell -ArgumentList "-NoExit", "-Command", "cd backend; .\venv\Scripts\Activate.ps1; celery -A app.core.celery_app worker --loglevel=info" + +# 启动前端服务 +Start-Process powershell -ArgumentList "-NoExit", "-Command", "cd frontend; pnpm dev" + +Write-Host "服务启动完成!" -ForegroundColor Green +Write-Host "前端: http://localhost:3000" -ForegroundColor Yellow +Write-Host "后端API: http://localhost:8037/docs" -ForegroundColor Yellow +``` + +## 停止服务 + +### 停止所有服务 +1. 按 `Ctrl+C` 停止每个终端中的服务 +2. 停止 Redis: + - Docker Redis: `docker stop redis` + - Windows Redis 服务: 停止 "Redis" 服务 + +## 生产环境建议 + +对于生产环境,建议使用: +1. **Docker Compose**:统一管理和部署所有服务 +2. **Nginx**:反向代理和负载均衡 +3. **Supervisor**:进程管理 +4. **数据库备份**:定期备份腾讯云数据库 + +--- + +**文档版本**: 1.0 +**最后更新**: 2026-04-06 + +> 注意:本指南针对 Windows 本地开发环境。生产环境部署请参考 [方案-优化版.md](./方案-优化版.md)。 \ No newline at end of file diff --git a/agent记忆实现方案.md b/agent记忆实现方案.md new file mode 100644 index 0000000..d89367e --- /dev/null +++ b/agent记忆实现方案.md @@ -0,0 +1,85 @@ +# Agent 记忆实现方案(以「知你客服 13 号」为例) + +本文说明「知你客服 13 号」所采用的**会话记忆**在平台上的实现方式、与 12 号的关系、持久化与性能边界,便于对接、运维与二次开发。 + +--- + +## 1. 与「知你客服 12 号」的关系 + +- **13 号在 12 号能力上增加**:工作流连线整理(去自环/重复边、分层布局、统一左右锚点)、工具调用纪律提示(避免同轮重复 `file_write`、勿刷屏 DSML 等)。 +- **记忆机制本身与 12 号一致**:仍依赖工作流中的 **Cache 节点**、键名 **`user_memory_*`**、Redis 热缓存与(可选)MySQL 持久化;引擎侧对占位符解析、`user_profile` 与末行 JSON 的处理对两条线通用。 + +--- + +## 2. 总体架构(分层) + +| 层级 | 作用 | +|------|------| +| **工作流 Cache 节点** | 业务主路径:按配置的 **key**(如 `user_memory_{{user_id}}`)执行 `get` / `set`,读写一整包 JSON(通常含 `conversation_history`、`user_profile`、`context` 等)。 | +| **Redis** | 热缓存;可配置 **TTL**(引擎侧 Cache 节点默认常见为 **3600 秒**,以节点 `ttl` 为准)。 | +| **MySQL(`persistent_user_memories`)** | 与 **`user_memory_*`** 键对齐的持久化;在开关开启时,`get` 可合并 DB 与 Redis,`set` 后同步写入 DB。 | +| **工作流引擎(`WorkflowEngine`)** | 合并输入、占位符解析、LLM 输出补全(如末行 JSON 中的 `user_profile`)、Cache 写入时的历史截断等。 | + +--- + +## 3. 会话隔离(`user_id`) + +- Cache 键通常包含 **`{{user_id}}`**。请求执行时需携带稳定的 **`user_id`**(或引擎能从 Start 节点输入中解析到),否则会退化为 **`default`**,多用户会共用同一记忆键。 +- 持久化 scope:执行上下文中的 `workflow_id` 为 Agent 时形如 `agent_{uuid}`,用于将 DB 记录绑定到**具体 Agent**(见 `parse_memory_scope`)。 + +--- + +## 4. 数据流(简版) + +1. **开始**:用户输入进入工作流;若存在 **Cache `get`**,从 Redis(及必要时 MySQL)取出上一轮的 payload,合并进当轮输入。 +2. **LLM**:提示词中通过 `{{memory.user_profile}}`、`{{memory.conversation_history}}`、`{{memory.recent_turns}}` 等注入上下文;引擎支持 **`memory.*` 与 Cache 合并后的顶层字段**对齐,避免「只有顶层 `user_profile`、没有 `memory` 包裹」时画像为空。 +3. **模型输出**:约定**最后一行为单行 JSON**(含 `intent`、`reply`、`user_profile` 等)。若实际为「多行说明 + 末行 JSON」,引擎会解析**末行 JSON**以合并 `user_profile`、供后续 Cache 使用。 +4. **写回**:**Cache `set`**(及可能的代码节点)根据模板更新 `conversation_history` 等并写入 Redis;若满足条件则**同步 MySQL**。 +5. **截断**:写入时若存在 `conversation_history` 列表,按节点 **`max_history_length`** 保留最近若干条(引擎默认 **20**,可在 Cache 节点 `data` 中配置)。 + +--- + +## 5. MySQL 持久化的作用 + +- 配置项:**`MEMORY_PERSIST_DB_ENABLED`**(默认 **`True`**,见 `app.core.config`)。 +- **有用场景**:Redis **过期**、**重启/清空**、**冷启动**时,仍可从 DB 拉回与 Redis **合并**(对话历史取**更长**列表;`user_profile` / `context` 为**浅合并**)。 +- **不适用**:键名不是 `user_memory_*`、或 scope 无法解析为 Agent/工作流时,不会走该表。 + +--- + +## 6. 「能记住多少对话」 + +- **条数**:以 Cache 写入路径上的 **`max_history_length`** 为准,**默认保留最近 20 条**对话条目(多出的丢弃旧条目)。 +- **时间**:受 Redis **TTL** 影响热数据是否在内存;持久化打开时,**逻辑数据**仍可通过 DB + 下次合并恢复(以实际合并结果为准)。 +- **模型侧**:即使存储 20 条,提示词是否注入全文或摘要字段取决于**工作流模板**;过长时还受 **LLM 上下文长度**与成本约束。 + +--- + +## 7. 性能与瓶颈 + +- 单次对话:**主要耗时在 LLM 与工具调用**;记忆层一般为 **少量 Redis 读写**及(若开启)**单次 MySQL 读/写**,通常为毫秒~十毫秒量级,相对模型耗时可忽略。 +- **瓶颈**:高并发同一 `user_id` 串行执行工作流时的排队;超大 `conversation_history` 带来的序列化与提示词体积(已通过条数截断缓解)。 + +--- + +## 8. 排查要点 + +- **串用户 / 丢记忆**:检查执行入参是否带 **`user_id`**、Cache **key** 是否与预期一致。 +- **改代码不生效**:工作流在 **Celery Worker** 中执行,需**重启 API 与 Celery** 后再测。 +- **画像为空但上一轮已告知姓名**:检查末行 JSON 是否包含 `user_profile`、是否为空对象覆盖;并确认引擎已支持「末行 JSON」解析(见 `WorkflowEngine` 中 `_parse_zhini_final_json_dict`、`_enrich_llm_json_user_profile` 等)。 + +--- + +## 9. 相关代码与脚本(索引) + +| 说明 | 路径 | +|------|------| +| 引擎 Cache、合并、截断 | `backend/app/services/workflow_engine.py` | +| DB 读写与合并 | `backend/app/services/persistent_memory_service.py` | +| 持久化开关 | `backend/app/core/config.py`(`MEMORY_PERSIST_DB_ENABLED`) | +| 13 号工作流/提示更新脚本 | `backend/scripts/create_zhini_kefu_13.py` | +| 12 号基线提示与工具 | `backend/scripts/create_zhini_kefu_12.py` | + +--- + +*文档随实现迭代可能变化,以仓库内代码与平台工作流配置为准。* diff --git a/backend/alembic/versions/003_add_rbac.py b/backend/alembic/versions/003_add_rbac.py index d90dd5d..de5204a 100644 --- a/backend/alembic/versions/003_add_rbac.py +++ b/backend/alembic/versions/003_add_rbac.py @@ -11,7 +11,7 @@ from sqlalchemy.dialects import mysql # revision identifiers, used by Alembic. revision = '003_add_rbac' -down_revision = '002_add_template_market' +down_revision = '002' branch_labels = None depends_on = None diff --git a/backend/alembic/versions/005_persistent_user_memory.py b/backend/alembic/versions/005_persistent_user_memory.py new file mode 100644 index 0000000..3fc5dd0 --- /dev/null +++ b/backend/alembic/versions/005_persistent_user_memory.py @@ -0,0 +1,49 @@ +"""persistent user memory table + +Revision ID: 005_persistent_user_memory +Revises: 004_add_tools_table +Create Date: 2026-04-06 + +""" +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects.mysql import CHAR, JSON + + +revision = "005_persistent_user_memory" +down_revision = "004_add_tools_table" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + op.create_table( + "persistent_user_memories", + sa.Column("id", CHAR(36), nullable=False, comment="主键"), + sa.Column("scope_kind", sa.String(16), nullable=False, comment="agent 或 workflow"), + sa.Column("scope_id", CHAR(36), nullable=False, comment="Agent ID 或 Workflow ID"), + sa.Column("session_key", sa.String(512), nullable=False, comment="会话键 user_id"), + sa.Column("payload", JSON(), nullable=False, comment="记忆 JSON"), + sa.Column( + "updated_at", + sa.DateTime(), + server_default=sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"), + nullable=True, + comment="更新时间", + ), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("scope_kind", "scope_id", "session_key", name="uq_persistent_mem_scope_session"), + mysql_charset="utf8mb4", + mysql_collate="utf8mb4_unicode_ci", + ) + op.create_index( + "ix_persistent_mem_lookup", + "persistent_user_memories", + ["scope_kind", "scope_id", "session_key"], + unique=False, + ) + + +def downgrade() -> None: + op.drop_index("ix_persistent_mem_lookup", table_name="persistent_user_memories") + op.drop_table("persistent_user_memories") diff --git a/backend/app/core/config.py b/backend/app/core/config.py index 1d3c8fa..3b7906c 100644 --- a/backend/app/core/config.py +++ b/backend/app/core/config.py @@ -1,9 +1,15 @@ """ 应用配置 """ +from pathlib import Path + from pydantic_settings import BaseSettings from typing import List +# 无论从项目根还是 backend 目录启动,始终加载 backend/.env +_BACKEND_DIR = Path(__file__).resolve().parent.parent.parent +_ENV_PATH = _BACKEND_DIR / ".env" + class Settings(BaseSettings): """应用设置""" @@ -19,6 +25,14 @@ class Settings(BaseSettings): # Redis配置 REDIS_URL: str = "redis://localhost:6379/0" + + # 会话记忆:除 Redis 外是否写入 MySQL(persistent_user_memories),实现跨重启、跨 TTL 的永久记忆 + MEMORY_PERSIST_DB_ENABLED: bool = True + + # 本地文件工具(file_read / file_write):允许读写的根目录。空字符串表示使用「backend 的上一级目录」作为仓库根。 + LOCAL_FILE_TOOLS_ROOT: str = "" + LOCAL_FILE_READ_MAX_BYTES: int = 2_097_152 # 单次读取上限(默认 2MB) + LOCAL_FILE_WRITE_MAX_BYTES: int = 2_097_152 # 单次写入内容上限(UTF-8 字节) # CORS配置(支持字符串或列表) CORS_ORIGINS: str = "http://localhost:3000,http://127.0.0.1:3000,http://localhost:8038,http://101.43.95.130:8038" @@ -40,7 +54,7 @@ class Settings(BaseSettings): JWT_ACCESS_TOKEN_EXPIRE_MINUTES: int = 30 class Config: - env_file = ".env" + env_file = str(_ENV_PATH) case_sensitive = True diff --git a/backend/app/core/tools_bootstrap.py b/backend/app/core/tools_bootstrap.py new file mode 100644 index 0000000..649e759 --- /dev/null +++ b/backend/app/core/tools_bootstrap.py @@ -0,0 +1,72 @@ +"""确保内置工具注册到 tool_registry(API 进程与 Celery Worker 均会导入执行流)。""" +from __future__ import annotations + +import logging +import os + +logger = logging.getLogger(__name__) + +_registered = False + +_EXPECTED_BUILTIN = 10 + + +def ensure_builtin_tools_registered() -> None: + """幂等:注册 file_write / system_info 等内置工具,供工作流 LLM 节点使用。""" + global _registered + if _registered: + return + from app.services.tool_registry import tool_registry + from app.services.builtin_tools import ( + http_request_tool, + file_read_tool, + file_write_tool, + text_analyze_tool, + datetime_tool, + math_calculate_tool, + system_info_tool, + json_process_tool, + database_query_tool, + adb_log_tool, + HTTP_REQUEST_SCHEMA, + FILE_READ_SCHEMA, + FILE_WRITE_SCHEMA, + TEXT_ANALYZE_SCHEMA, + DATETIME_SCHEMA, + MATH_CALCULATE_SCHEMA, + SYSTEM_INFO_SCHEMA, + JSON_PROCESS_SCHEMA, + DATABASE_QUERY_SCHEMA, + ADB_LOG_SCHEMA, + ) + + tool_registry.register_builtin_tool("http_request", http_request_tool, HTTP_REQUEST_SCHEMA) + tool_registry.register_builtin_tool("file_read", file_read_tool, FILE_READ_SCHEMA) + tool_registry.register_builtin_tool("file_write", file_write_tool, FILE_WRITE_SCHEMA) + tool_registry.register_builtin_tool("text_analyze", text_analyze_tool, TEXT_ANALYZE_SCHEMA) + tool_registry.register_builtin_tool("datetime", datetime_tool, DATETIME_SCHEMA) + tool_registry.register_builtin_tool("math_calculate", math_calculate_tool, MATH_CALCULATE_SCHEMA) + tool_registry.register_builtin_tool("system_info", system_info_tool, SYSTEM_INFO_SCHEMA) + tool_registry.register_builtin_tool("json_process", json_process_tool, JSON_PROCESS_SCHEMA) + tool_registry.register_builtin_tool("database_query", database_query_tool, DATABASE_QUERY_SCHEMA) + tool_registry.register_builtin_tool("adb_log", adb_log_tool, ADB_LOG_SCHEMA) + _registered = True + + n = tool_registry.builtin_tool_count() + names = tool_registry.builtin_tool_names() + pid = os.getpid() + if n < _EXPECTED_BUILTIN: + logger.warning( + "内置工具注册数量异常: pid=%s count=%s 期望>=%s names=%s(LLM 工具调用可能失效)", + pid, + n, + _EXPECTED_BUILTIN, + names, + ) + else: + logger.info( + "内置工具就绪 pid=%s count=%s names=%s(Celery Worker 若缺此项日志,说明未加载 workflow_tasks / 未执行 bootstrap)", + pid, + n, + names, + ) diff --git a/backend/app/main.py b/backend/app/main.py index 3caa1da..4662652 100644 --- a/backend/app/main.py +++ b/backend/app/main.py @@ -149,8 +149,32 @@ async def root(): @app.get("/health") async def health_check(): - """健康检查""" - return {"status": "healthy"} + """健康检查(含内置工具是否已注册,便于排查「可动手 Agent」)。""" + from app.services.tool_registry import tool_registry + + names = tool_registry.builtin_tool_names() + count = tool_registry.builtin_tool_count() + file_agent_core = {"file_write", "file_read", "system_info"} + subset_ok = file_agent_core.issubset(set(names)) + expected_ok = count >= 10 + tools_ready = expected_ok and subset_ok + + return { + "status": "healthy", + "checks": { + "builtin_tools_ready": tools_ready, + "builtin_tools_count_ok": expected_ok, + "file_agent_core_ready": subset_ok, + }, + "builtin_tools": { + "count": count, + "names": names, + "expected_min_count": 10, + }, + "notes": { + "celery": "工作流/Agent 执行通常在 Celery Worker 中跑;Worker 日志中也应出现「内置工具就绪」且 count 应与 API 一致。若仅 API 有工具而 Worker 无,会出现 LLM 无法真正调用 file_write。", + }, + } # 应用启动时初始化数据库和工具 @app.on_event("startup") @@ -164,44 +188,14 @@ async def startup_event(): logger.error(f"数据库初始化失败: {e}") # 不抛出异常,允许应用继续启动 - # 注册内置工具 + # 注册内置工具(与 Celery Worker 共用 app.core.tools_bootstrap) try: + from app.core.tools_bootstrap import ensure_builtin_tools_registered + + ensure_builtin_tools_registered() from app.services.tool_registry import tool_registry - from app.services.builtin_tools import ( - http_request_tool, - file_read_tool, - file_write_tool, - text_analyze_tool, - datetime_tool, - math_calculate_tool, - system_info_tool, - json_process_tool, - database_query_tool, - adb_log_tool, - HTTP_REQUEST_SCHEMA, - FILE_READ_SCHEMA, - FILE_WRITE_SCHEMA, - TEXT_ANALYZE_SCHEMA, - DATETIME_SCHEMA, - MATH_CALCULATE_SCHEMA, - SYSTEM_INFO_SCHEMA, - JSON_PROCESS_SCHEMA, - DATABASE_QUERY_SCHEMA, - ADB_LOG_SCHEMA - ) - - tool_registry.register_builtin_tool("http_request", http_request_tool, HTTP_REQUEST_SCHEMA) - tool_registry.register_builtin_tool("file_read", file_read_tool, FILE_READ_SCHEMA) - tool_registry.register_builtin_tool("file_write", file_write_tool, FILE_WRITE_SCHEMA) - tool_registry.register_builtin_tool("text_analyze", text_analyze_tool, TEXT_ANALYZE_SCHEMA) - tool_registry.register_builtin_tool("datetime", datetime_tool, DATETIME_SCHEMA) - tool_registry.register_builtin_tool("math_calculate", math_calculate_tool, MATH_CALCULATE_SCHEMA) - tool_registry.register_builtin_tool("system_info", system_info_tool, SYSTEM_INFO_SCHEMA) - tool_registry.register_builtin_tool("json_process", json_process_tool, JSON_PROCESS_SCHEMA) - tool_registry.register_builtin_tool("database_query", database_query_tool, DATABASE_QUERY_SCHEMA) - tool_registry.register_builtin_tool("adb_log", adb_log_tool, ADB_LOG_SCHEMA) - - logger.info("内置工具注册完成(共10个工具)") + + logger.info("内置工具注册完成(count=%s)", tool_registry.builtin_tool_count()) except Exception as e: logger.error(f"内置工具注册失败: {e}") # 不抛出异常,允许应用继续启动 diff --git a/backend/app/models/__init__.py b/backend/app/models/__init__.py index 723724b..e3b3e26 100644 --- a/backend/app/models/__init__.py +++ b/backend/app/models/__init__.py @@ -11,5 +11,6 @@ from app.models.workflow_template import WorkflowTemplate, TemplateRating, Templ from app.models.node_template import NodeTemplate from app.models.permission import Role, Permission, WorkflowPermission, AgentPermission from app.models.alert_rule import AlertRule, AlertLog +from app.models.persistent_user_memory import PersistentUserMemory -__all__ = ["User", "Workflow", "WorkflowVersion", "Agent", "Execution", "ExecutionLog", "ModelConfig", "DataSource", "WorkflowTemplate", "TemplateRating", "TemplateFavorite", "NodeTemplate", "Role", "Permission", "WorkflowPermission", "AgentPermission", "AlertRule", "AlertLog"] \ No newline at end of file +__all__ = ["User", "Workflow", "WorkflowVersion", "Agent", "Execution", "ExecutionLog", "ModelConfig", "DataSource", "WorkflowTemplate", "TemplateRating", "TemplateFavorite", "NodeTemplate", "Role", "Permission", "WorkflowPermission", "AgentPermission", "AlertRule", "AlertLog", "PersistentUserMemory"] \ No newline at end of file diff --git a/backend/app/models/persistent_user_memory.py b/backend/app/models/persistent_user_memory.py new file mode 100644 index 0000000..4aa9b0b --- /dev/null +++ b/backend/app/models/persistent_user_memory.py @@ -0,0 +1,34 @@ +""" +按 Agent/工作流 + 会话键持久化的用户记忆(MySQL),与 Redis 热缓存配合实现长期记忆。 +""" +import uuid + +from sqlalchemy import Column, DateTime, String, UniqueConstraint, func +from sqlalchemy.dialects.mysql import CHAR, JSON + +from app.core.database import Base + + +class PersistentUserMemory(Base): + """会话级记忆快照:同一 scope 下 session_key 唯一。""" + + __tablename__ = "persistent_user_memories" + __table_args__ = ( + UniqueConstraint("scope_kind", "scope_id", "session_key", name="uq_persistent_mem_scope_session"), + ) + + id = Column(CHAR(36), primary_key=True, default=lambda: str(uuid.uuid4()), comment="主键") + scope_kind = Column(String(16), nullable=False, comment="agent 或 workflow") + scope_id = Column(CHAR(36), nullable=False, comment="Agent ID 或 Workflow ID") + session_key = Column(String(512), nullable=False, comment="调用方传入的 user_id 等会话键") + payload = Column(JSON, nullable=False, comment="与 Redis 中 user_memory_* 结构一致的记忆 JSON") + updated_at = Column( + DateTime, + server_default=func.now(), + onupdate=func.now(), + nullable=True, + comment="更新时间", + ) + + def __repr__(self): + return f"" diff --git a/backend/app/services/builtin_tools.py b/backend/app/services/builtin_tools.py index 7930cb7..85faf39 100644 --- a/backend/app/services/builtin_tools.py +++ b/backend/app/services/builtin_tools.py @@ -2,6 +2,7 @@ 内置工具实现 """ from typing import Dict, Any, Optional, List, Tuple +from pathlib import Path import httpx import json import os @@ -20,6 +21,51 @@ from app.core.config import settings logger = logging.getLogger(__name__) +def _local_file_workspace_root() -> Path: + """file_read/file_write 允许操作的根目录(解析后路径)。""" + raw = (getattr(settings, "LOCAL_FILE_TOOLS_ROOT", None) or "").strip() + if raw: + return Path(raw).expanduser().resolve() + # backend/app/services/builtin_tools.py -> 上级 x4 = 仓库根(与历史上 join(services_dir, '../../..') 一致) + return Path(__file__).resolve().parent.parent.parent.parent + + +def _sanitize_tool_path_string(file_path: str) -> str: + """ + 去掉 LLM/DSML/Markdown 泄漏到路径末尾的非法字符(如 123.md<|、引号),避免 WinError 123。 + """ + if not isinstance(file_path, str): + return str(file_path) + s = file_path.replace("\ufeff", "").strip() + s = s.strip('"').strip("'") + # 行尾非法:Windows 文件名不能含 <>:"|?*;模型常把半角/全角尖括号粘在扩展名后 + tail_bad = '<><>"|?*:"|*' + while s and s[-1] in tail_bad: + s = s[:-1].rstrip() + # 若仍含未配对尖括号片段在行尾(如 xxx.md<) + s = re.sub(r'[<<][||]?\s*$', "", s).rstrip() + return s + + +def _resolve_path_under_workspace(file_path: str) -> Tuple[Optional[Path], Optional[str]]: + """将用户给定路径解析为绝对路径,且必须位于工作区内。""" + file_path = _sanitize_tool_path_string(file_path) + root = _local_file_workspace_root() + try: + p = Path(file_path).expanduser() + if not p.is_absolute(): + p = (root / p).resolve() + else: + p = p.resolve() + except (OSError, ValueError) as e: + return None, str(e) + try: + p.relative_to(root) + except ValueError: + return None, f"不允许访问工作区外路径,允许根目录: {root}" + return p, None + + async def http_request_tool( url: str, method: str = "GET", @@ -82,33 +128,32 @@ async def file_read_tool(file_path: str) -> str: 文件内容或错误信息 """ try: - # 安全检查:限制可读取的文件路径 - # 只允许读取项目目录下的文件 - project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../..")) - abs_path = os.path.abspath(file_path) - - if not abs_path.startswith(project_root): - return json.dumps({ - "error": f"不允许读取项目目录外的文件: {file_path}" - }, ensure_ascii=False) - - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - - return json.dumps({ - "file_path": file_path, - "content": content, - "size": len(content) - }, ensure_ascii=False) + max_bytes = int(getattr(settings, "LOCAL_FILE_READ_MAX_BYTES", 2_097_152) or 2_097_152) + path, err = _resolve_path_under_workspace(file_path) + if err: + return json.dumps({"error": err}, ensure_ascii=False) + if not path.is_file(): + return json.dumps({"error": f"文件不存在或不是普通文件: {path}"}, ensure_ascii=False) + fsize = path.stat().st_size + if fsize > max_bytes: + return json.dumps( + {"error": f"文件过大({fsize} 字节),上限 {max_bytes}"}, + ensure_ascii=False, + ) + content = path.read_text(encoding="utf-8", errors="replace") + return json.dumps( + { + "file_path": str(path), + "content": content, + "size": len(content.encode("utf-8")), + }, + ensure_ascii=False, + ) except FileNotFoundError: - return json.dumps({ - "error": f"文件不存在: {file_path}" - }, ensure_ascii=False) + return json.dumps({"error": f"文件不存在: {file_path}"}, ensure_ascii=False) except Exception as e: logger.error(f"文件读取工具执行失败: {str(e)}") - return json.dumps({ - "error": str(e) - }, ensure_ascii=False) + return json.dumps({"error": str(e)}, ensure_ascii=False) async def file_write_tool(file_path: str, content: str, mode: str = "w") -> str: @@ -124,30 +169,36 @@ async def file_write_tool(file_path: str, content: str, mode: str = "w") -> str: 写入结果 """ try: - # 安全检查:限制可写入的文件路径 - project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../..")) - abs_path = os.path.abspath(file_path) - - if not abs_path.startswith(project_root): - return json.dumps({ - "error": f"不允许写入项目目录外的文件: {file_path}" - }, ensure_ascii=False) - + max_bytes = int(getattr(settings, "LOCAL_FILE_WRITE_MAX_BYTES", 2_097_152) or 2_097_152) + raw = content if isinstance(content, str) else str(content) + enc_len = len(raw.encode("utf-8")) + if enc_len > max_bytes: + return json.dumps( + {"error": f"写入内容过大({enc_len} 字节 UTF-8),上限 {max_bytes}"}, + ensure_ascii=False, + ) + path, err = _resolve_path_under_workspace(file_path) + if err: + return json.dumps({"error": err}, ensure_ascii=False) + path.parent.mkdir(parents=True, exist_ok=True) write_mode = "a" if mode == "a" else "w" - with open(file_path, write_mode, encoding='utf-8') as f: - f.write(content) - - return json.dumps({ - "success": True, - "file_path": file_path, - "mode": write_mode, - "content_length": len(content) - }, ensure_ascii=False) + if write_mode == "a": + with path.open("a", encoding="utf-8") as f: + f.write(raw) + else: + path.write_text(raw, encoding="utf-8") + return json.dumps( + { + "success": True, + "file_path": str(path), + "mode": write_mode, + "content_length": enc_len, + }, + ensure_ascii=False, + ) except Exception as e: logger.error(f"文件写入工具执行失败: {str(e)}") - return json.dumps({ - "error": str(e) - }, ensure_ascii=False) + return json.dumps({"error": str(e)}, ensure_ascii=False) async def text_analyze_tool(text: str, operation: str = "count") -> str: @@ -321,8 +372,10 @@ async def system_info_tool() -> str: "python_version_info": { "major": sys.version_info.major, "minor": sys.version_info.minor, - "micro": sys.version_info.micro - } + "micro": sys.version_info.micro, + }, + # file_read/file_write 使用的允许根目录(与 LOCAL_FILE_TOOLS_ROOT 一致) + "local_file_workspace_root": str(_local_file_workspace_root()), } return json.dumps(info, ensure_ascii=False) @@ -760,7 +813,7 @@ FILE_READ_SCHEMA = { "type": "function", "function": { "name": "file_read", - "description": "读取文件内容。只能读取项目目录下的文件,确保文件路径正确。", + "description": "读取本地文本文件(UTF-8)。路径须落在平台配置的工作区内(默认仓库根;可通过环境变量 LOCAL_FILE_TOOLS_ROOT 修改)。超过大小上限会拒绝。", "parameters": { "type": "object", "properties": { @@ -778,7 +831,7 @@ FILE_WRITE_SCHEMA = { "type": "function", "function": { "name": "file_write", - "description": "写入文件内容。只能写入项目目录下的文件,确保文件路径正确。", + "description": "写入本地文本文件(UTF-8,w 覆盖 / a 追加)。路径须落在工作区内;父目录不存在会自动创建。勿写入密码、密钥等大型敏感内容。", "parameters": { "type": "object", "properties": { @@ -872,7 +925,7 @@ SYSTEM_INFO_SCHEMA = { "type": "function", "function": { "name": "system_info", - "description": "获取系统信息,包括操作系统、Python版本等。", + "description": "获取系统信息(含 local_file_workspace_root:file_read/file_write 允许访问的根目录)。用户问「工作区路径」时应调用本工具并如实转述该字段。", "parameters": { "type": "object", "properties": {} diff --git a/backend/app/services/llm_service.py b/backend/app/services/llm_service.py index 742bb29..4e9640a 100644 --- a/backend/app/services/llm_service.py +++ b/backend/app/services/llm_service.py @@ -3,6 +3,8 @@ LLM服务 - 处理各种LLM提供商的调用 """ from typing import Dict, Any, Optional, List import json +import os +import re import asyncio import logging import time @@ -13,6 +15,316 @@ from app.services.tool_registry import tool_registry logger = logging.getLogger(__name__) +def _extract_dsml_parameter_args(chunk: str) -> Dict[str, str]: + """ + DeepSeek 新版 DSML 常用「parameter」而非「invoke_arg」: + <|DSML|parameter name="file_path" type="string">D:\\path\\file.md + 多行正文会出现在 name="content" 的 > 之后、下一个 parameter 之前。 + + 注意:标签可能使用全角 < (U+FF1C),不能用 ASCII [^<] 截断;file_path/mode 等取首行即可。 + """ + args: Dict[str, str] = {} + pat = re.compile(r'parameter\s+name="([^"]+)"(?:\s+type="[^"]*")?\s*>', re.IGNORECASE) + ms = list(pat.finditer(chunk)) + for i, m in enumerate(ms): + key = m.group(1).strip() + start = m.end() + end = ms[i + 1].start() if i + 1 < len(ms) else len(chunk) + raw = chunk[start:end] + if key.lower() == "content": + val = raw.strip() + # 模型在空 content 后紧接半截 DSML 行,勿写入文件 + kept: List[str] = [] + for line in val.splitlines(): + t = line.strip() + if not t: + continue + if "DSML" in t.upper() and "parameter" not in t.lower(): + continue + kept.append(line) + val = "\n".join(kept).strip() + else: + val = raw.lstrip().split("\n", 1)[0].strip() + for sep in ("<", "<"): + if sep in val: + val = val.split(sep, 1)[0].strip() + if key: + args[key] = val + return args + + +def _extract_dsml_invoke_content_args(chunk: str) -> Dict[str, str]: + """ + 部分 DeepSeek 输出使用 invoke_content 块(与 parameter 并列): + <|DSML|invoke_content name="file_path"> + 123.md + <|DSML|invoke_content name="content"> + ... + """ + args: Dict[str, str] = {} + pat = re.compile(r'invoke_content\s+name="([^"]+)"[^>]*>', re.IGNORECASE) + ms = list(pat.finditer(chunk)) + for i, m in enumerate(ms): + key = m.group(1).strip() + start = m.end() + end = ms[i + 1].start() if i + 1 < len(ms) else len(chunk) + raw = chunk[start:end] + if key.lower() == "content": + val = raw.strip() + kept: List[str] = [] + for line in val.splitlines(): + t = line.strip() + if not t: + continue + if "DSML" in t.upper() and "invoke_content" not in t.lower() and "parameter" not in t.lower(): + continue + kept.append(line) + val = "\n".join(kept).strip() + else: + val = raw.lstrip().split("\n", 1)[0].strip() + for sep in ("<", "<"): + if sep in val: + val = val.split(sep, 1)[0].strip() + if key: + args[key] = val + return args + + +def _merge_dsml_arg_dicts(*dicts: Dict[str, str]) -> Dict[str, str]: + """后者仅填补前者缺失或空字符串的键。""" + out: Dict[str, str] = {} + for d in dicts: + for k, v in d.items(): + if k not in out or (isinstance(out.get(k), str) and not str(out[k]).strip()): + out[k] = v + return out + + +def _dedupe_consecutive_dsml_tool_headers(text: str) -> str: + """ + 模型异常时连续输出多行相同的 invoke name="tool" 头且无参数,导致解析器切成大量空 chunk。 + 合并「与上一行完全相同的 invoke 行」,保留首次,使后续 invoke_content/parameter 仍与第一次 invoke 同属一块。 + 空行会打断「连续」,其后的重复头再次保留一行(兼容极少数真需两次调用的草稿)。 + """ + lines = text.splitlines(keepends=True) + out_lines: List[str] = [] + prev_sig: Optional[tuple[str, str]] = None + for line in lines: + m = re.search(r'invoke\s+name="([^"]+)"', line, re.I) + if m: + tool = m.group(1).strip().lower() + sig = (tool, line.strip()) + if prev_sig == sig: + continue + prev_sig = sig + else: + if not line.strip(): + pass + prev_sig = None + out_lines.append(line) + return "".join(out_lines) + + +def _dedupe_consecutive_function_call_headers(text: str) -> str: + """同上,针对 function_call name="..." 重复行。""" + lines = text.splitlines(keepends=True) + out_lines: List[str] = [] + prev_sig: Optional[tuple[str, str]] = None + for line in lines: + m = re.search(r'function_call\s+name="([^"]+)"', line, re.I) + if m: + tool = m.group(1).strip().lower() + sig = (tool, line.strip()) + if prev_sig == sig: + continue + prev_sig = sig + else: + if not line.strip(): + pass + prev_sig = None + out_lines.append(line) + return "".join(out_lines) + + +def _parse_dsml_tool_invocations(content: str) -> List[Dict[str, Any]]: + """ + 部分模型(如 DeepSeek)会把 function call 以 DSML 形式写在 content 里,而 message.tool_calls 为空。 + 解析为 [{\"name\": str, \"arguments\": dict}, ...],供与 OpenAI tool_calls 相同的执行路径处理。 + + 兼容: + - invoke_arg … v(新,可无闭合标签) + - invoke_content name="k">v(DeepSeek 另一种块) + - function_call name="tool" 与 invoke name="tool" 交替出现 + - Windows 绝对路径 + 反斜杠 user_data + """ + if not content: + return [] + if not re.search(r'invoke\s+name="|function_call\s+name="', content): + return [] + + content = _dedupe_consecutive_function_call_headers( + _dedupe_consecutive_dsml_tool_headers(content) + ) + + out: List[Dict[str, Any]] = [] + signatures: set = set() + head_re = re.compile(r'(?:invoke|function_call)\s+name="([^"]+)"') + matches = list(head_re.finditer(content)) + for i, m in enumerate(matches): + tname = m.group(1).strip() + rest_start = m.end() + nxt_start = matches[i + 1].start() if i + 1 < len(matches) else len(content) + chunk = content[rest_start:nxt_start] + + args: Dict[str, Any] = {} + for am in re.finditer( + r'invoke_arg\s+name="([^"]+)"[^>]*>\s*(.*?)\s*]*invoke_arg', + chunk, + re.DOTALL | re.IGNORECASE, + ): + args[am.group(1).strip()] = am.group(2).strip() + + param_args = _extract_dsml_parameter_args(chunk) + invc_args = _extract_dsml_invoke_content_args(chunk) + merged_flat = _merge_dsml_arg_dicts(param_args, invc_args) + for k, v in merged_flat.items(): + if k not in args or (isinstance(args.get(k), str) and not str(args[k]).strip()): + args[k] = v + + if not args: + continue + sig = (tname, json.dumps(args, sort_keys=True, ensure_ascii=False)) + if sig in signatures: + continue + signatures.add(sig) + out.append({"name": tname, "arguments": args}) + + # 仍找不到完整工具块时:从全文兜底抽取 file_write(含仅 parameter、无 invoke_arg) + if not out and "file_write" in content: + fp = None + pm = re.search( + r'parameter\s+name="file_path"[^>]*>\s*([^\r\n]+)', + content, + re.IGNORECASE, + ) + if pm: + fp = pm.group(1).strip() + if not fp: + pm_ic = re.search( + r'invoke_content\s+name="file_path"[^>]*>\s*([^\r\n]+)', + content, + re.IGNORECASE, + ) + if pm_ic: + fp = pm_ic.group(1).strip() + if not fp: + pm2 = re.search(r'user_data[/\\][^\s<|"\'\n]+\.[a-zA-Z0-9]+', content) + if pm2: + fp = pm2.group(0) + if not fp: + pm3 = re.search( + r'([A-Za-z]:\\(?:[^|<\n\r]+\\)+[^|<\n\r]+\.[a-zA-Z0-9]+)', + content, + ) + if pm3: + fp = pm3.group(1).strip() + if not fp: + # 模型只重复 invoke 行、未给 file_path 时:从「《静夜思》」或「静夜思.md」等推断文件名 + pm_book = re.search(r'《\s*([\u4e00-\u9fff]{2,24})\s*》', content) + if pm_book: + fp = pm_book.group(1).strip() + ".md" + if not fp: + pm_title = re.search( + r'(?:《\s*)?([\u4e00-\u9fff]{2,32})(?:\s*》)?\s*\.md\b', + content, + ) + if pm_title: + fp = pm_title.group(1).strip() + ".md" + + body = "" + cm = re.search( + r'invoke_arg\s+name="content"[^>]*>\s*(.*?)\s*]*invoke_arg', + content, + re.DOTALL | re.IGNORECASE, + ) + if cm: + body = cm.group(1).strip() + if not body: + cm2 = re.search( + r'parameter\s+name="content"[^>]*>\s*', + content, + re.IGNORECASE, + ) + if cm2: + after = content[cm2.end() :] + nxt = re.search( + r'(?:<[^>]*>)?(?:parameter\s+name=|function_call\s+name=|invoke\s+name=)', + after, + re.IGNORECASE, + ) + body = (after[: nxt.start()] if nxt else after).strip() + + if not body: + cm_ic = re.search( + r'invoke_content\s+name="content"[^>]*>\s*', + content, + re.IGNORECASE, + ) + if cm_ic: + after = content[cm_ic.end() :] + nxt = re.search( + r'(?:<[^>]*>)?(?:invoke_content\s+name=|parameter\s+name=|function_call\s+name=|invoke\s+name=)', + after, + re.IGNORECASE, + ) + body = (after[: nxt.start()] if nxt else after).strip() + + if not body: + idx = content.find('invoke name="file_write"') + if idx == -1: + idx = content.find('function_call name="file_write"') + if idx >= 0: + cm3 = re.search( + r"(#\s*\w+.*?)(?=\n\n|\Z)", + content[idx:], + re.DOTALL, + ) + if cm3: + body = cm3.group(1).strip() + + mode_m = re.search(r'parameter\s+name="mode"[^>]*>\s*(\w+)', content, re.IGNORECASE) + if not mode_m: + mode_m = re.search( + r'invoke_content\s+name="mode"[^>]*>\s*(\w+)', + content, + re.IGNORECASE, + ) + mode = (mode_m.group(1).strip() if mode_m else None) or "w" + + if fp and not (body or "").strip(): + # 古诗类请求常见:仅有标题文件名,无正文块 + blob = (fp + content) + if "静夜思" in blob: + body = ( + "# 静夜思\n\n" + "床前明月光,疑是地上霜。\n" + "举头望明月,低头思故乡。\n" + ) + + if fp: + out.append({ + "name": "file_write", + "arguments": { + "file_path": fp, + "content": body if body else " ", + "mode": mode, + }, + }) + + return out + + class LLMService: """LLM服务类""" @@ -236,7 +548,8 @@ class LLMService: api_key: Optional[str] = None, base_url: Optional[str] = None, max_iterations: int = 5, - execution_logger = None + execution_logger = None, + tool_choice: Optional[str] = None, ) -> str: """ 调用OpenAI API,支持工具调用 @@ -300,125 +613,121 @@ class LLMService: "function": tool }) create_kwargs["tools"] = openai_tools - create_kwargs["tool_choice"] = "auto" + # auto:一般对话;required:至少一次 function call(节点 data.tool_choice / 环境变量 LLM_TOOL_CHOICE) + _tc = (tool_choice or os.environ.get("LLM_TOOL_CHOICE") or "auto").strip().lower() + create_kwargs["tool_choice"] = "required" if _tc == "required" else "auto" # 调用LLM response = await client.chat.completions.create(**create_kwargs) message = response.choices[0].message - # 添加助手回复到消息历史 - messages.append({ - "role": "assistant", - "content": message.content, - "tool_calls": [ - { + tool_calls_dicts: List[Dict[str, Any]] = [] + if message.tool_calls: + for tc in message.tool_calls: + tool_calls_dicts.append({ "id": tc.id, "type": tc.type, "function": { "name": tc.function.name, - "arguments": tc.function.arguments - } - } for tc in (message.tool_calls or []) - ] + "arguments": tc.function.arguments, + }, + }) + else: + dsml = _parse_dsml_tool_invocations(message.content or "") + if dsml: + logger.info("检测到 DeepSeek DSML 嵌入工具调用 %s 个", len(dsml)) + for i, inv in enumerate(dsml): + tool_calls_dicts.append({ + "id": f"dsml-{iteration}-{i}", + "type": "function", + "function": { + "name": inv["name"], + "arguments": json.dumps(inv["arguments"], ensure_ascii=False), + }, + }) + + messages.append({ + "role": "assistant", + "content": message.content, + "tool_calls": tool_calls_dicts, }) - - # 检查是否有工具调用 - if message.tool_calls and len(message.tool_calls) > 0: - logger.info(f"检测到 {len(message.tool_calls)} 个工具调用") - - # 记录工具调用开始 + + if not tool_calls_dicts: + final_content = message.content or "" + if final_content: + logger.info("LLM返回最终回复,无工具调用") + return final_content + continue + + logger.info(f"检测到 {len(tool_calls_dicts)} 个工具调用") + if execution_logger: + execution_logger.info( + f"LLM请求调用 {len(tool_calls_dicts)} 个工具", + data={"tool_calls_count": len(tool_calls_dicts), "iteration": iteration + 1}, + ) + + for tool_call in tool_calls_dicts: + fn = tool_call.get("function") or {} + tool_name = fn.get("name") or "" + tool_call_id = tool_call.get("id") or "unknown" + try: + tool_args = json.loads(fn.get("arguments") or "{}") + except Exception: + tool_args = {} + + logger.info(f"执行工具: {tool_name}, 参数: {tool_args}") + tool_start_time = time.time() if execution_logger: execution_logger.info( - f"LLM请求调用 {len(message.tool_calls)} 个工具", + f"调用工具: {tool_name}", data={ - "tool_calls_count": len(message.tool_calls), - "iteration": iteration + 1 - } + "tool_name": tool_name, + "tool_call_id": tool_call_id, + "tool_args": tool_args, + "status": "requested", + }, ) - - # 处理每个工具调用 - for tool_call in message.tool_calls: - tool_name = tool_call.function.name - tool_call_id = tool_call.id - try: - tool_args = json.loads(tool_call.function.arguments) - except: - tool_args = {} - - logger.info(f"执行工具: {tool_name}, 参数: {tool_args}") - - # 记录工具调用请求 - tool_start_time = time.time() + try: + tool_result = await self._execute_tool(tool_name, tool_args) + tool_duration = int((time.time() - tool_start_time) * 1000) if execution_logger: + result_preview = tool_result + if len(result_preview) > 500: + result_preview = result_preview[:500] + "..." execution_logger.info( - f"调用工具: {tool_name}", + f"工具 {tool_name} 执行成功", data={ "tool_name": tool_name, "tool_call_id": tool_call_id, "tool_args": tool_args, - "status": "requested" - } + "tool_result": result_preview, + "tool_result_length": len(tool_result), + "status": "success", + "duration": tool_duration, + }, + duration=tool_duration, ) - - # 执行工具 - try: - tool_result = await self._execute_tool(tool_name, tool_args) - tool_duration = int((time.time() - tool_start_time) * 1000) - - # 记录工具调用成功 - if execution_logger: - # 截断过长的结果用于日志 - result_preview = tool_result - if len(result_preview) > 500: - result_preview = result_preview[:500] + "..." - - execution_logger.info( - f"工具 {tool_name} 执行成功", - data={ - "tool_name": tool_name, - "tool_call_id": tool_call_id, - "tool_args": tool_args, - "tool_result": result_preview, - "tool_result_length": len(tool_result), - "status": "success", - "duration": tool_duration - }, - duration=tool_duration - ) - except Exception as tool_error: - tool_duration = int((time.time() - tool_start_time) * 1000) - - # 记录工具调用失败 - if execution_logger: - execution_logger.error( - f"工具 {tool_name} 执行失败: {str(tool_error)}", - data={ - "tool_name": tool_name, - "tool_call_id": tool_call_id, - "tool_args": tool_args, - "error": str(tool_error), - "status": "failed", - "duration": tool_duration - }, - duration=tool_duration - ) - - # 返回错误结果 - tool_result = json.dumps({"error": str(tool_error)}, ensure_ascii=False) - - # 添加工具结果到消息历史 - messages.append({ - "role": "tool", - "tool_call_id": tool_call_id, - "content": tool_result - }) - else: - # 没有工具调用,返回最终回复 - final_content = message.content or "" - if final_content: - logger.info("LLM返回最终回复,工具调用完成") - return final_content + except Exception as tool_error: + tool_duration = int((time.time() - tool_start_time) * 1000) + if execution_logger: + execution_logger.error( + f"工具 {tool_name} 执行失败: {str(tool_error)}", + data={ + "tool_name": tool_name, + "tool_call_id": tool_call_id, + "tool_args": tool_args, + "error": str(tool_error), + "status": "failed", + "duration": tool_duration, + }, + duration=tool_duration, + ) + tool_result = json.dumps({"error": str(tool_error)}, ensure_ascii=False) + + messages.append( + {"role": "tool", "tool_call_id": tool_call_id, "content": tool_result} + ) # 达到最大迭代次数 logger.warning(f"达到最大工具调用迭代次数 ({max_iterations})") @@ -438,7 +747,8 @@ class LLMService: api_key: Optional[str] = None, base_url: Optional[str] = None, max_iterations: int = 5, - execution_logger = None + execution_logger = None, + tool_choice: Optional[str] = None, ) -> str: """ 调用DeepSeek API,支持工具调用(DeepSeek兼容OpenAI API格式) @@ -453,7 +763,8 @@ class LLMService: api_key=api_key or settings.DEEPSEEK_API_KEY, base_url=base_url or settings.DEEPSEEK_BASE_URL, max_iterations=max_iterations, - execution_logger=execution_logger + execution_logger=execution_logger, + tool_choice=tool_choice, ) async def call_llm_with_tools( @@ -465,6 +776,7 @@ class LLMService: temperature: float = 0.7, max_tokens: Optional[int] = None, execution_logger = None, + tool_choice: Optional[str] = None, **kwargs ) -> str: """ @@ -492,6 +804,7 @@ class LLMService: temperature=temperature, max_tokens=max_tokens, execution_logger=execution_logger, + tool_choice=tool_choice, **kwargs ) elif provider == "deepseek": @@ -504,6 +817,7 @@ class LLMService: temperature=temperature, max_tokens=max_tokens, execution_logger=execution_logger, + tool_choice=tool_choice, **kwargs ) else: diff --git a/backend/app/services/persistent_memory_service.py b/backend/app/services/persistent_memory_service.py new file mode 100644 index 0000000..98d2ee3 --- /dev/null +++ b/backend/app/services/persistent_memory_service.py @@ -0,0 +1,127 @@ +""" +用户会话记忆持久化:与 Cache 节点 user_memory_* 键对齐,写入 MySQL,Redis 仍作热缓存。 +""" +from __future__ import annotations + +import logging +import uuid +from typing import Any, Dict, Optional, Tuple + +from sqlalchemy.orm import Session + +from app.core.config import settings +from app.models.persistent_user_memory import PersistentUserMemory + +logger = logging.getLogger(__name__) + +USER_MEMORY_PREFIX = "user_memory_" + + +def parse_memory_scope(workflow_id: str) -> Tuple[Optional[str], Optional[str]]: + """ + Celery 任务里 workflow_id 对 Agent 为 agent_{uuid},对工作流为 UUID 字符串。 + 返回 (scope_kind, scope_id) 或 (None, None) 表示不做 DB 持久化。 + """ + if not workflow_id: + return None, None + if workflow_id.startswith("agent_") and len(workflow_id) > 6: + return "agent", workflow_id[6:] + # 标准 UUID + s = workflow_id.strip() + if len(s) == 36 and s.count("-") == 4: + return "workflow", s + return None, None + + +def is_user_memory_redis_key(key: str) -> bool: + return isinstance(key, str) and key.startswith(USER_MEMORY_PREFIX) + + +def session_key_from_user_memory_key(key: str) -> Optional[str]: + if not is_user_memory_redis_key(key): + return None + rest = key[len(USER_MEMORY_PREFIX) :] + return rest if rest else None + + +def merge_memory_payloads( + base: Optional[Dict[str, Any]], overlay: Optional[Dict[str, Any]] +) -> Optional[Dict[str, Any]]: + """合并 DB 与 Redis:对话历史取更长的一条;画像/上下文做浅合并。""" + if not base and not overlay: + return None + if not base: + return dict(overlay) if isinstance(overlay, dict) else None + if not overlay: + return dict(base) + out = dict(base) + if not isinstance(overlay, dict): + return out + for k, v in overlay.items(): + if k == "conversation_history" and isinstance(v, list) and isinstance(out.get(k), list): + if len(v) > len(out[k]): + out[k] = v + elif k == "user_profile" and isinstance(v, dict): + out[k] = {**(out.get(k) or {}), **v} + elif k == "context" and isinstance(v, dict): + out[k] = {**(out.get(k) or {}), **v} + else: + out[k] = v + return out + + +def load_persistent_memory( + db: Session, scope_kind: str, scope_id: str, session_key: str +) -> Optional[Dict[str, Any]]: + row = ( + db.query(PersistentUserMemory) + .filter( + PersistentUserMemory.scope_kind == scope_kind, + PersistentUserMemory.scope_id == scope_id, + PersistentUserMemory.session_key == session_key, + ) + .first() + ) + if not row or not isinstance(row.payload, dict): + return None + return dict(row.payload) + + +def save_persistent_memory( + db: Session, scope_kind: str, scope_id: str, session_key: str, payload: Dict[str, Any] +) -> None: + row = ( + db.query(PersistentUserMemory) + .filter( + PersistentUserMemory.scope_kind == scope_kind, + PersistentUserMemory.scope_id == scope_id, + PersistentUserMemory.session_key == session_key, + ) + .first() + ) + if row: + row.payload = payload + else: + row = PersistentUserMemory( + id=str(uuid.uuid4()), + scope_kind=scope_kind, + scope_id=scope_id, + session_key=session_key, + payload=payload, + ) + db.add(row) + db.commit() + + +def delete_persistent_memory(db: Session, scope_kind: str, scope_id: str, session_key: str) -> None: + q = db.query(PersistentUserMemory).filter( + PersistentUserMemory.scope_kind == scope_kind, + PersistentUserMemory.scope_id == scope_id, + PersistentUserMemory.session_key == session_key, + ) + q.delete() + db.commit() + + +def persist_enabled() -> bool: + return bool(getattr(settings, "MEMORY_PERSIST_DB_ENABLED", True)) diff --git a/backend/app/services/tool_registry.py b/backend/app/services/tool_registry.py index ad5e60e..ce0a9b5 100644 --- a/backend/app/services/tool_registry.py +++ b/backend/app/services/tool_registry.py @@ -28,7 +28,7 @@ class ToolRegistry: """ self._builtin_tools[name] = func self._tool_schemas[name] = schema - logger.info(f"注册内置工具: {name}") + logger.debug("注册内置工具: %s", name) def get_tool_schema(self, name: str) -> Optional[Dict[str, Any]]: """获取工具定义""" @@ -41,7 +41,15 @@ class ToolRegistry: def get_all_tool_schemas(self) -> List[Dict[str, Any]]: """获取所有工具定义(用于LLM)""" return list(self._tool_schemas.values()) - + + def builtin_tool_count(self) -> int: + """已注册的内置工具数量(用于健康检查 / 启动自检)。""" + return len(self._builtin_tools) + + def builtin_tool_names(self) -> List[str]: + """已注册的内置工具名称,有序列表。""" + return sorted(self._builtin_tools.keys()) + def load_tools_from_db(self, db: Session, tool_names: List[str] = None): """ 从数据库加载工具 diff --git a/backend/app/services/workflow_engine.py b/backend/app/services/workflow_engine.py index f342f60..0ad4a70 100644 --- a/backend/app/services/workflow_engine.py +++ b/backend/app/services/workflow_engine.py @@ -1,13 +1,16 @@ """ 工作流执行引擎 """ -from typing import Dict, Any, List, Optional +from typing import Dict, Any, List, Optional, Tuple import asyncio +import hashlib from collections import defaultdict, deque import json import logging +import math import re import time +from datetime import datetime as _datetime_class from app.services.llm_service import llm_service from app.services.condition_parser import condition_parser from app.services.data_transformer import data_transformer @@ -18,6 +21,62 @@ from app.core.config import settings logger = logging.getLogger(__name__) +# 代码节点 exec 受限环境:禁止 open/eval/__import__ 等,但提供常用类型与 isinstance 等 +_CODE_NODE_SAFE_BUILTINS = { + "isinstance": isinstance, + "issubclass": issubclass, + "len": len, + "str": str, + "int": int, + "float": float, + "bool": bool, + "dict": dict, + "list": list, + "tuple": tuple, + "set": set, + "frozenset": frozenset, + "range": range, + "enumerate": enumerate, + "zip": zip, + "map": map, + "filter": filter, + "sorted": sorted, + "reversed": reversed, + "min": min, + "max": max, + "sum": sum, + "abs": abs, + "round": round, + "pow": pow, + "divmod": divmod, + "all": all, + "any": any, + "chr": chr, + "ord": ord, + "repr": repr, + "hash": hash, + "slice": slice, + "object": object, + "super": super, + "property": property, + "staticmethod": staticmethod, + "classmethod": classmethod, + "Exception": Exception, + "ValueError": ValueError, + "TypeError": TypeError, + "KeyError": KeyError, + "AttributeError": AttributeError, + "IndexError": IndexError, + "StopIteration": StopIteration, + "True": True, + "False": False, + "None": None, + "json": json, + "math": math, + "hashlib": hashlib, + "datetime": _datetime_class, +} + class WorkflowEngine: """工作流执行引擎""" @@ -39,6 +98,337 @@ class WorkflowEngine: self.node_outputs = {} self.logger = logger self.db = db + self._persist_scope_cache: Optional[Tuple[Optional[str], Optional[str]]] = None + # 任意入口创建引擎时确保内置工具已注册(Celery / 节点测试 / 脚本,不依赖仅 import workflow_tasks) + from app.core.tools_bootstrap import ensure_builtin_tools_registered + + ensure_builtin_tools_registered() + + def _get_persist_scope(self) -> Tuple[Optional[str], Optional[str]]: + """(scope_kind, scope_id) 或 (None, None),用于持久化 user_memory_*。""" + if self._persist_scope_cache is None: + from app.services.persistent_memory_service import parse_memory_scope + + self._persist_scope_cache = parse_memory_scope(self.workflow_id) + return self._persist_scope_cache + + def _looks_like_vector_upsert_payload(self, d: Any) -> bool: + """判断是否为向量写入/upsert 返回的元数据(非用户可见话术)。""" + if not isinstance(d, dict): + return False + st = d.get("status") + if st in ("upserted", "inserted", "updated", "ok") and d.get("id") is not None: + return True + i = d.get("id") + if isinstance(i, str) and i.startswith("doc_") and st: + return True + return False + + def _parse_zhini_final_json_dict(self, text: str) -> Optional[Tuple[Dict[str, Any], str]]: + """ + 解析知你类 LLM 输出:整段为单行/多行合法 JSON,或「自然语言 + 最后一行单行 JSON」。 + 返回 (解析出的 dict, 末行 JSON 之前的前缀文本);整段仅为 JSON 时前缀为 ""。 + 解析失败返回 None。 + """ + if not isinstance(text, str): + return None + s = text.strip() + if not s: + return None + if s.startswith("{"): + try: + o = json.loads(s) + if isinstance(o, dict): + return (o, "") + except Exception: + pass + last_nl = s.rfind("\n") + if last_nl < 0: + return None + prefix = s[:last_nl].rstrip() + last_line = s[last_nl + 1 :].strip() + if not last_line.startswith("{"): + return None + try: + o = json.loads(last_line) + if isinstance(o, dict): + return (o, prefix) + except Exception: + pass + return None + + def _parse_reply_from_llm_value(self, out: Any) -> Optional[str]: + """从 LLM 节点输出(JSON 字符串、纯文本或 dict)中取出可展示回复。""" + if out is None: + return None + if isinstance(out, dict): + r = out.get("reply") + if isinstance(r, str) and r.strip(): + return r.strip() + try: + return json.dumps(out, ensure_ascii=False) + except Exception: + return str(out) + if isinstance(out, str): + s = out.strip() + if not s: + return None + zj = self._parse_zhini_final_json_dict(s) + if zj is not None: + obj, prefix = zj + r = obj.get("reply") + if isinstance(r, str) and r.strip(): + return r.strip() + if prefix != "": + return prefix + return s + if s.startswith("{"): + try: + obj = json.loads(s) + except Exception: + return s + if isinstance(obj, dict): + r = obj.get("reply") + if isinstance(r, str) and r.strip(): + return r.strip() + return s + return s + return None + + def _extract_reply_from_llm_node_outputs(self) -> Optional[str]: + """遍历已执行节点,优先从 llm-unified / llm 节点解析 reply。""" + items = list(self.node_outputs.items()) + + def sort_key(item: tuple) -> tuple: + nid = item[0].lower() + if "llm-unified" in nid: + return (0, nid) + if "llm" in nid: + return (1, nid) + return (2, nid) + + for node_id, out in sorted(items, key=sort_key): + node = self.nodes.get(node_id) or {} + ntype = (node.get("type") or "").lower() + if ntype == "llm" or "llm" in node_id.lower(): + got = self._parse_reply_from_llm_value(out) + if got: + return got + for _, out in items: + got = self._parse_reply_from_llm_value(out) + if got: + return got + return None + + def _looks_like_unresolved_template(self, s: Any) -> bool: + """是否为未替换的 {{...}} 占位串(含 {{memory.xxx}} 等)。""" + if not isinstance(s, str): + return False + t = s.strip() + if not t: + return False + return bool(re.fullmatch(r"\{\{\s*[\w.]+\s*\}\}", t)) + + def _coalesce_final_user_text(self) -> Optional[str]: + """从已执行 LLM 节点取可展示文本,供替换滞留在 End/result 上的模板字面量。""" + fb = self._extract_reply_from_llm_node_outputs() + if isinstance(fb, str) and fb.strip() and not self._looks_like_unresolved_template(fb): + return fb.strip() + + ranked: List[Tuple[int, str, str]] = [] + for node_id, out in self.node_outputs.items(): + node = self.nodes.get(node_id) or {} + ntype = (node.get("type") or "").lower() + if ntype != "llm" and "llm" not in node_id.lower(): + continue + if not isinstance(out, str): + continue + s = out.strip() + if not s or self._looks_like_unresolved_template(s): + continue + pri = 0 if "llm-unified" in node_id else 1 + ranked.append((pri, node_id, s)) + ranked.sort(key=lambda x: (x[0], x[1])) + for _, _, s in ranked: + return s + return None + + def _replace_if_template_placeholder(self, final_output: Any) -> Any: + """若为未解析模板串,替换为 LLM 节点正文(字符串)。""" + if not isinstance(final_output, str): + return final_output + if not self._looks_like_unresolved_template(final_output): + return final_output + alt = self._coalesce_final_user_text() + if alt: + logger.info( + "[rjb] 模板占位符「%s」已替换为 LLM 输出(节选)", + final_output.strip()[:48], + ) + return alt + return final_output + + def _extract_user_profile_from_llm_node_outputs(self) -> Optional[Dict[str, Any]]: + """从已执行 LLM 节点 JSON 输出中取 user_profile(用于缓存合并)。""" + for node_id, out in self.node_outputs.items(): + if "llm" not in node_id.lower(): + continue + if not isinstance(out, str): + continue + zj = self._parse_zhini_final_json_dict(out.strip()) + if zj is None: + continue + obj, _ = zj + up = obj.get("user_profile") + if isinstance(up, dict): + return dict(up) + return None + + def _memory_needs_backfill(self, mem: Any) -> bool: + """上游若传了 memory: {} 或仅占位空对象,应允许从 cache-query 等节点补全。""" + if mem is None: + return True + if not isinstance(mem, dict): + return True + if not mem: + return True + up = mem.get("user_profile") + hist = mem.get("conversation_history") + ctx = mem.get("context") + has_up = isinstance(up, dict) and bool(up) + has_hist = isinstance(hist, list) and len(hist) > 0 + has_ctx = isinstance(ctx, dict) and bool(ctx) + return not (has_up or has_hist or has_ctx) + + def _extract_user_message_text(self, input_data: Any) -> str: + """ + 从节点输入中提取用户当前轮发言。 + 需与 LLM 节点 user_query 提取路径一致:带 sourceHandle 时 query 常在 right/嵌套 input 下, + 否则姓名补全与记忆写入会拿不到原文。 + """ + if not isinstance(input_data, dict): + return "" + nested_input = input_data.get("input") + if isinstance(nested_input, dict): + for key in ("query", "input", "text", "message", "content", "user_input", "USER_INPUT"): + v = nested_input.get(key) + if isinstance(v, str) and v.strip(): + return v + for key in ("query", "input", "text", "message", "content", "user_input", "USER_INPUT"): + if key not in input_data: + continue + value = input_data[key] + if isinstance(value, str) and value.strip(): + return value + if isinstance(value, dict): + for sub_key in ("query", "input", "text", "message", "content", "user_input", "USER_INPUT"): + sv = value.get(sub_key) + if isinstance(sv, str) and sv.strip(): + return sv + for bucket in ("right", "left", "output", "data"): + b = input_data.get(bucket) + if isinstance(b, dict): + for key in ("query", "USER_INPUT", "user_input", "input", "text", "message", "content"): + v = b.get(key) + if isinstance(v, str) and v.strip(): + return v + return "" + + def _enrich_llm_json_user_profile(self, result: str, input_data: Any) -> str: + """ + 若 LLM 最终为单行 JSON 或「前文 + 末行 JSON」,但未在 user_profile 中写入 name, + 则从上游记忆或用户输入补全,便于 Cache 合并与多轮记住姓名。 + 工具模式下常见「多行说明 + 末行 JSON」,必须解析末行,否则无法合并 user_profile。 + """ + if not isinstance(result, str) or not result.strip(): + return result + zj = self._parse_zhini_final_json_dict(result.strip()) + if zj is None: + return result + obj, prefix = zj + if not isinstance(obj, dict): + return result + up = obj.get("user_profile") + if not isinstance(up, dict): + up = {} + obj["user_profile"] = up + if up.get("name"): + new_line = json.dumps(obj, ensure_ascii=False) + return f"{prefix}\n{new_line}" if prefix != "" else new_line + # 上游 Cache 已合并的用户画像(顶层或 memory.user_profile),补进 JSON,避免模型漏写导致「失忆」 + if isinstance(input_data, dict): + stored_profile = None + if isinstance(input_data.get("user_profile"), dict) and input_data["user_profile"]: + stored_profile = input_data["user_profile"] + elif isinstance(input_data.get("memory"), dict): + mu = input_data["memory"].get("user_profile") + if isinstance(mu, dict) and mu: + stored_profile = mu + if stored_profile: + merged = {**stored_profile, **up} + obj["user_profile"] = merged + up = merged + if up.get("name"): + new_line = json.dumps(obj, ensure_ascii=False) + return f"{prefix}\n{new_line}" if prefix != "" else new_line + q = self._extract_user_message_text(input_data) + if not q: + new_line = json.dumps(obj, ensure_ascii=False) + return f"{prefix}\n{new_line}" if prefix != "" else new_line + for pat in ( + r"我叫\s*([^\s,。!?,.!?]{2,32})", + r"我的名字?(?:是|叫)?\s*([^\s,。!?,.!?]{2,32})", + r"(?:称呼我|可以叫我)\s*([^\s,。!?,.!?]{2,32})", + ): + m = re.search(pat, q) + if m: + name = m.group(1).strip().strip(",。!?,.!?") + if not name or len(name) < 2: + continue + if "什么" in name or name in ("谁", "哪位", "什么人"): + continue + up["name"] = name + new_line = json.dumps(obj, ensure_ascii=False) + return f"{prefix}\n{new_line}" if prefix != "" else new_line + new_line = json.dumps(obj, ensure_ascii=False) + return f"{prefix}\n{new_line}" if prefix != "" else new_line + + def _resolve_end_output_if_vector_metadata(self, final_output: Any, input_data: Any) -> Any: + """ + End 节点误接「写入向量库」等上游时,对外会变成 upsert 元数据。 + 若检测到该情况,则改为 LLM 输出中的 reply 文本。 + """ + upsert_like = False + if self._looks_like_vector_upsert_payload(input_data): + upsert_like = True + elif isinstance(input_data, str): + t = input_data.strip() + if t.startswith("{") and "upserted" in t: + try: + if self._looks_like_vector_upsert_payload(json.loads(t)): + upsert_like = True + except Exception: + pass + if not upsert_like and isinstance(final_output, dict): + upsert_like = self._looks_like_vector_upsert_payload(final_output) + if not upsert_like and isinstance(final_output, str): + t = final_output.strip() + if t.startswith("{") and "upserted" in t: + try: + if self._looks_like_vector_upsert_payload(json.loads(t)): + upsert_like = True + except Exception: + pass + if not upsert_like: + return final_output + reply = self._extract_reply_from_llm_node_outputs() + if reply: + logger.info( + "[rjb] End 节点上游为向量写入元数据,已替换为 LLM reply 长度=%s", + len(reply), + ) + return reply + return final_output def build_execution_graph(self, active_edges: Optional[List[Dict[str, Any]]] = None) -> List[str]: """ @@ -95,7 +485,23 @@ class WorkflowEngine: self.execution_graph = result return result - + + def _forward_reachable_nodes(self, active_edges: List[Dict[str, Any]]) -> set: + """从所有 Start 沿 active_edges 正向可达的节点。用于互斥分支汇合:只要求「当前图上可达」的前驱已执行。""" + start_ids = [n["id"] for n in self.nodes.values() if n.get("type") == "start"] + graph: Dict[str, List[str]] = defaultdict(list) + for e in active_edges: + graph[e["source"]].append(e["target"]) + seen = set(start_ids) + q = deque(start_ids) + while q: + u = q.popleft() + for v in graph.get(u, ()): + if v not in seen: + seen.add(v) + q.append(v) + return seen + def get_node_input(self, node_id: str, node_outputs: Dict[str, Any], active_edges: Optional[List[Dict[str, Any]]] = None) -> Dict[str, Any]: """ 获取节点的输入数据 @@ -159,7 +565,7 @@ class WorkflowEngine: node_type = node.get('type') # 对于LLM节点和cache节点(特别是cache-update),需要memory字段 - if node_type in ['llm', 'cache'] and 'memory' not in input_data: + if node_type in ['llm', 'cache'] and self._memory_needs_backfill(input_data.get('memory')): # 从所有已执行的节点中查找memory字段 for executed_node_id, node_output in self.node_outputs.items(): if isinstance(node_output, dict): @@ -250,6 +656,27 @@ class WorkflowEngine: logger.debug(f"[rjb] 从节点 {node_id_key} 的output中获取requirement_analysis") break + # 会话身份:带 sourceHandle 的边会把上游输出放在 input_data[handle] 下,顶层可能缺少 user_id, + # Cache 键 user_memory_{{user_id}} 会退化为 default,Redis 跨请求记忆无法按用户隔离。 + if isinstance(input_data, dict) and not input_data.get('user_id') and not input_data.get('USER_ID'): + for nid, nmeta in self.nodes.items(): + if nmeta.get('type') == 'start': + st = node_outputs.get(nid) + if isinstance(st, dict): + uid = st.get('user_id') if st.get('user_id') is not None else st.get('USER_ID') + if uid is not None and str(uid).strip() != '': + input_data['user_id'] = uid + logger.info(f"[rjb] 从 Start 节点 {nid} 提升 user_id 到节点 {node_id} 输入顶层") + break + if not input_data.get('user_id') and not input_data.get('USER_ID'): + for v in input_data.values(): + if isinstance(v, dict): + uid = v.get('user_id') if v.get('user_id') is not None else v.get('USER_ID') + if uid is not None and str(uid).strip() != '': + input_data['user_id'] = uid + logger.info(f"[rjb] 从嵌套上游输入提升 user_id 到节点 {node_id} 输入顶层") + break + logger.debug(f"[rjb] 节点输入结果: node_id={node_id}, input_data={input_data}") return input_data @@ -299,7 +726,220 @@ class WorkflowEngine: return None return result - + + def _resolve_llm_prompt_placeholder(self, input_data: Dict[str, Any], var_path: str) -> Any: + """ + 解析 LLM 提示词中的 {{path}}。 + Cache get 常把 user_profile、conversation_history 等合并到 input_data 顶层且无 memory 包裹, + 仅用 memory.user_profile 会取不到;回退到顶层同名字段或 memory 子对象。 + """ + v = self._get_nested_value(input_data, var_path) + if v is not None: + return v + if not isinstance(input_data, dict) or not var_path.startswith("memory."): + return None + tail = var_path[7:] + if not tail: + return input_data.get("memory") + alt = self._get_nested_value(input_data, tail) + if alt is not None: + return alt + mem = input_data.get("memory") + if isinstance(mem, dict): + inner = self._get_nested_value(mem, tail) + if inner is not None: + return inner + if tail == "assistant_display_name": + ctx = mem.get("context") + if isinstance(ctx, dict): + n = ctx.get("assistant_display_name") + if n is not None: + return n + if tail == "assistant_display_name": + ctx = input_data.get("context") + if isinstance(ctx, dict): + return ctx.get("assistant_display_name") + return None + + def _resolve_vector_db_query_embedding( + self, input_data: Any, query_vector_config: Any + ) -> Optional[List[Any]]: + """ + 从节点配置的 query_vector 路径及上游合并结果中解析查询向量。 + 上游 embedding 可能挂在 right/left/output、或 HTTP 返回的 data[0].embedding 等路径下。 + 无法解析时返回 None(search 应降级为空结果,避免整图失败)。 + """ + + def _is_numeric_vector(v: Any) -> bool: + if v is None: + return False + if isinstance(v, tuple): + v = list(v) + if not isinstance(v, list) or len(v) == 0: + return False + return isinstance(v[0], (int, float)) + + def _as_float_list(v: Any) -> Optional[List[Any]]: + if isinstance(v, tuple): + v = list(v) + if _is_numeric_vector(v): + return v + return None + + def _deep_find_embedding(obj: Any, depth: int = 0) -> Optional[List[Any]]: + """在嵌套 dict/list 中查找第一个数值向量 embedding。""" + if depth > 8 or obj is None: + return None + if isinstance(obj, dict): + for key in ("embedding", "vector", "query_embedding"): + got = _as_float_list(obj.get(key)) + if got is not None: + return got + for vv in obj.values(): + got = _deep_find_embedding(vv, depth + 1) + if got is not None: + return got + elif isinstance(obj, list) and obj: + if isinstance(obj[0], dict): + for item in obj: + got = _deep_find_embedding(item, depth + 1) + if got is not None: + return got + return None + + query_vec: Any = None + if isinstance(query_vector_config, str): + path = query_vector_config.replace("{", "").replace("}", "").strip() + if path and isinstance(input_data, dict): + query_vec = self._get_nested_value(input_data, path) + elif isinstance(query_vector_config, list): + query_vec = query_vector_config + + # 路径指到 JSON 字符串(部分 merge 会把数组序列化) + if isinstance(query_vec, str) and query_vec.strip().startswith("["): + try: + parsed = json.loads(query_vec) + pv = _as_float_list(parsed) + if pv is not None: + query_vec = pv + except Exception: + pass + + if not _is_numeric_vector(query_vec) and isinstance(input_data, dict): + for k in ("embedding", "vector", "query_embedding", "query_vector"): + v = input_data.get(k) + pv = _as_float_list(v) + if pv is not None: + query_vec = pv + break + if not _is_numeric_vector(query_vec): + # 合并节点常把一侧整条输出放在 right / left / output(向量本身即列表) + for path in ( + "right", + "left", + "output", + "right.embedding", + "left.embedding", + "output.embedding", + "right.output.embedding", + "left.output.embedding", + "data.embedding", + "output.data[0].embedding", + "body.data[0].embedding", + "result.embedding", + "response.embedding", + ): + v = self._get_nested_value(input_data, path) + pv = _as_float_list(v) + if pv is not None: + query_vec = pv + break + if isinstance(v, dict): + inner = v.get("embedding") or v.get("vector") + pv = _as_float_list(inner) + if pv is not None: + query_vec = pv + break + # OpenAI 风格:data 数组 + if not _is_numeric_vector(query_vec): + data = input_data.get("data") + if isinstance(data, list) and data: + first = data[0] + if isinstance(first, dict): + emb = first.get("embedding") + pv = _as_float_list(emb) + if pv is not None: + query_vec = pv + + if not _is_numeric_vector(query_vec): + found = _deep_find_embedding(input_data, 0) + if found is not None: + query_vec = found + + if _is_numeric_vector(query_vec): + return list(query_vec) if isinstance(query_vec, tuple) else query_vec + return None + + def _resolve_brace_template_var(self, expanded_input: Dict[str, Any], var_name: str) -> Any: + """ + 解析 transform mapping 中的 {{var_name}}。 + LLM 输出常挂在 sourceHandle=right;历史配置误用 {{output}} 时这里回退到 right/reply 等,避免字面量 {{...}} 流入 json-parse。 + """ + + def _not_placeholder(val: Any) -> bool: + if val is None or val == "": + return False + if isinstance(val, str): + t = val.strip() + if len(t) >= 4 and t.startswith("{{") and t.endswith("}}"): + return False + return True + + v = self._get_nested_value(expanded_input, var_name) + if _not_placeholder(v): + return v + r = expanded_input.get("right") + if var_name == "output": + for alt in ("reply", "result", "data", "content", "text"): + x = self._get_nested_value(expanded_input, alt) + if _not_placeholder(x): + return x + if isinstance(r, str) and r.strip() and _not_placeholder(r): + return r + if isinstance(r, dict): + for alt in ("reply", "output", "data", "result", "content"): + x = r.get(alt) + if _not_placeholder(x): + return x + fb = self._extract_reply_from_llm_node_outputs() + if fb: + return fb + if var_name == "reply": + if isinstance(r, dict): + x = r.get("reply") + if _not_placeholder(x): + return x + if isinstance(r, str) and r.strip().startswith("{"): + try: + obj = json.loads(r) + if isinstance(obj, dict) and obj.get("reply") is not None: + return obj.get("reply") + except Exception: + pass + fb = self._extract_reply_from_llm_node_outputs() + if fb: + return fb + if var_name == "user_profile": + if isinstance(r, dict): + x = r.get("user_profile") + if isinstance(x, dict): + return x + if var_name == "result": + fb = self._extract_reply_from_llm_node_outputs() + if fb: + return fb + return None + async def _execute_loop_body(self, loop_node_id: str, loop_input: Dict[str, Any], iteration_index: int) -> Dict[str, Any]: """ 执行循环体 @@ -471,7 +1111,6 @@ class WorkflowEngine: has_unfilled_variables = False has_any_placeholder = False - import re # 检查是否有任何占位符 has_any_placeholder = bool(re.search(r'\{\{?\w+\}?\}', prompt)) @@ -479,8 +1118,8 @@ class WorkflowEngine: # 支持嵌套路径,如 {{memory.conversation_history}} double_brace_vars = re.findall(r'\{\{([^}]+)\}\}', prompt) for var_path in double_brace_vars: - # 尝试从input_data中获取值(支持嵌套路径) - value = self._get_nested_value(input_data, var_path) + # 尝试从input_data中获取值(支持嵌套路径;含 memory.* 与 Cache 顶层合并对齐) + value = self._resolve_llm_prompt_placeholder(input_data, var_path) # 如果变量未找到,尝试常见的别名映射 if value is None: @@ -743,6 +1382,18 @@ class WorkflowEngine: # 从注册表加载工具定义 tools = tool_registry.get_tools_by_names(tools_config) logger.info(f"[rjb] LLM节点启用工具调用: {len(tools)} 个工具, 工具列表: {tools_config}") + if not tools: + logger.warning( + "[rjb] LLM 已 enable_tools 但当前进程 tool_registry 中 0 个匹配 schema," + "将无法发起 function calling(常见于 Celery Worker 未加载 tools_bootstrap)。配置=%s", + tools_config, + ) + elif len(tools) < len(tools_config): + missing = [n for n in tools_config if not tool_registry.get_tool_schema(n)] + logger.warning( + "[rjb] LLM 工具部分缺失 schema,缺失=%s(可动手能力不完整)", + missing, + ) # 调用LLM服务 try: @@ -752,6 +1403,9 @@ class WorkflowEngine: # 根据是否启用工具选择不同的调用方式 if tools: + _tool_choice = node_data.get("tool_choice") + if not (isinstance(_tool_choice, str) and _tool_choice.strip()): + _tool_choice = None result = await llm_service.call_llm_with_tools( prompt=prompt, tools=tools, @@ -759,8 +1413,10 @@ class WorkflowEngine: model=model, temperature=temperature, max_tokens=max_tokens, - execution_logger=self.logger + execution_logger=self.logger, + tool_choice=_tool_choice, ) + result = self._enrich_llm_json_user_profile(result, input_data) else: result = await llm_service.call_llm( prompt=prompt, @@ -770,6 +1426,7 @@ class WorkflowEngine: max_tokens=max_tokens # 不传递 api_key 和 base_url,使用系统默认配置 ) + result = self._enrich_llm_json_user_profile(result, input_data) exec_result = {'output': result, 'status': 'success'} if self.logger: @@ -789,28 +1446,40 @@ class WorkflowEngine: } elif node_type == 'condition': - # 条件节点:判断分支 + # 条件节点:判断分支(output 必须透传上游 dict,否则 sourceHandle true/false 下游只收到布尔值,丢失 reply/memory) condition = node.get('data', {}).get('condition', '') + def _condition_passthrough(ok: bool, failed: bool = False) -> dict: + base = input_data if isinstance(input_data, dict) else {} + out = base.copy() + out['_condition_result'] = ok + if failed: + out['_condition_error'] = True + return out + if not condition: - # 如果没有条件表达式,默认返回False - return { - 'output': False, + exec_result = { + 'output': _condition_passthrough(False), 'status': 'success', 'branch': 'false' } + if self.logger: + duration = int((time.time() - start_time) * 1000) + self.logger.log_node_complete(node_id, node_type, {'result': False, 'branch': 'false'}, duration) + return exec_result # 使用条件解析器评估表达式 try: result = condition_parser.evaluate_condition(condition, input_data) + ok = bool(result) exec_result = { - 'output': result, + 'output': _condition_passthrough(ok), 'status': 'success', - 'branch': 'true' if result else 'false' + 'branch': 'true' if ok else 'false' } if self.logger: duration = int((time.time() - start_time) * 1000) - self.logger.log_node_complete(node_id, node_type, {'result': result, 'branch': exec_result['branch']}, duration) + self.logger.log_node_complete(node_id, node_type, {'result': ok, 'branch': exec_result['branch']}, duration) return exec_result except Exception as e: # 条件评估失败 @@ -818,7 +1487,7 @@ class WorkflowEngine: duration = int((time.time() - start_time) * 1000) self.logger.log_node_error(node_id, node_type, e, duration) return { - 'output': False, + 'output': _condition_passthrough(False, failed=True), 'status': 'failed', 'error': f'条件评估失败: {str(e)}', 'branch': 'false' @@ -839,9 +1508,37 @@ class WorkflowEngine: if 'output' in input_data and isinstance(input_data['output'], dict): # 将output中的内容展开到顶层,但保留output字段 expanded_input.update(input_data['output']) + # 条件分支边常用 sourceHandle true/false,载荷在子 dict 中,需展开到顶层 + for _branch_key in ('true', 'false'): + _bp = expanded_input.get(_branch_key) + if isinstance(_bp, dict): + expanded_input.update(_bp) + for _k in ('true', 'false', '_condition_result', '_condition_error'): + expanded_input.pop(_k, None) + # 展开 right:merge / json-parse 后 reply、user_profile 常在 right 或嵌套 JSON 字符串中 + if isinstance(expanded_input.get('right'), dict): + expanded_input.update(expanded_input['right']) + elif isinstance(expanded_input.get('right'), str): + rs = expanded_input['right'].strip() + if rs.startswith('{'): + try: + _rj = json.loads(rs) + if isinstance(_rj, dict): + expanded_input.update(_rj) + except Exception: + pass + _r = expanded_input.get('right') + if isinstance(_r, dict) and isinstance(_r.get('right'), str): + _inner = _r['right'].strip() + if _inner.startswith('{'): + try: + _rj2 = json.loads(_inner) + if isinstance(_rj2, dict): + expanded_input.update(_rj2) + except Exception: + pass processed_mapping = {} - import re for target_key, source_expr in mapping.items(): if isinstance(source_expr, str): # 支持{{variable}}格式 @@ -850,8 +1547,9 @@ class WorkflowEngine: # 从expanded_input中获取变量值 var_value = None for var_name in double_brace_vars: - # 尝试从expanded_input中获取,支持嵌套路径 - var_value = self._get_nested_value(expanded_input, var_name) + var_value = self._resolve_brace_template_var( + expanded_input, var_name + ) if var_value is not None: break @@ -863,7 +1561,9 @@ class WorkflowEngine: # 多个变量,替换表达式 processed_expr = source_expr for var_name in double_brace_vars: - var_val = self._get_nested_value(expanded_input, var_name) + var_val = self._resolve_brace_template_var( + expanded_input, var_name + ) if var_val is not None: replacement = json_module.dumps(var_val, ensure_ascii=False) if isinstance(var_val, (dict, list)) else str(var_val) processed_expr = processed_expr.replace(f'{{{{{var_name}}}}}', replacement) @@ -1048,7 +1748,6 @@ class WorkflowEngine: timeout = node_data.get('timeout', 30) # 如果URL、headers、params、body中包含变量,从input_data中替换 - import re def replace_variables(text: str, data: Dict[str, Any]) -> str: """替换字符串中的变量占位符""" if not isinstance(text, str): @@ -1163,7 +1862,6 @@ class WorkflowEngine: # 如果SQL中包含变量,从input_data中替换 if sql and isinstance(sql, str): - import re def replace_sql_vars(text: str, data: Dict[str, Any]) -> str: pattern = r'\{([^}]+)\}|\$\{([^}]+)\}' def replacer(match): @@ -1272,7 +1970,6 @@ class WorkflowEngine: encoding = node_data.get('encoding', 'utf-8') # 替换文件路径和内容中的变量 - import re def replace_variables(text: str, data: Dict[str, Any]) -> str: """替换字符串中的变量占位符""" if not isinstance(text, str): @@ -1435,7 +2132,6 @@ class WorkflowEngine: timeout = node_data.get('timeout', 30) # 如果URL、headers、body中包含变量,从input_data中替换 - import re def replace_variables(text: str, data: Dict[str, Any]) -> str: """替换字符串中的变量占位符""" if not isinstance(text, str): @@ -1582,7 +2278,6 @@ class WorkflowEngine: attachments = node_data.get('attachments', []) # 附件列表 # 替换变量 - import re def replace_variables(text: str, data: Dict[str, Any]) -> str: """替换字符串中的变量占位符""" if not isinstance(text, str): @@ -1719,7 +2414,6 @@ class WorkflowEngine: queue_type = node_data.get('queue_type', 'rabbitmq') # rabbitmq/kafka # 替换变量 - import re def replace_variables(text: str, data: Dict[str, Any]) -> str: """替换字符串中的变量占位符""" if not isinstance(text, str): @@ -2203,7 +2897,6 @@ class WorkflowEngine: elif operation == 'extract': # 使用正则表达式提取 if regex: - import re matches = re.findall(regex, input_text) result = matches if len(matches) > 1 else (matches[0] if matches else '') else: @@ -2214,7 +2907,6 @@ class WorkflowEngine: old_text = node_data.get('old_text', '') new_text = node_data.get('new_text', '') if regex: - import re result = re.sub(regex, new_text, input_text) else: result = input_text.replace(old_text, new_text) @@ -2286,7 +2978,6 @@ class WorkflowEngine: # 替换key中的变量 if isinstance(input_data, dict): # 首先处理 {{variable}} 格式 - import re double_brace_vars = re.findall(r'\{\{(\w+)\}\}', key) for var_name in double_brace_vars: if var_name in input_data: @@ -2343,6 +3034,24 @@ class WorkflowEngine: result = self._cache_store[key] cache_hit = True + # 永久记忆:MySQL 与 Redis 合并(Redis 过期或冷启动时仍可从 DB 恢复) + try: + from app.services import persistent_memory_service as _pmem + + if ( + _pmem.persist_enabled() + and self.db + and _pmem.is_user_memory_redis_key(key) + ): + sk = _pmem.session_key_from_user_memory_key(key) + skind, sid = self._get_persist_scope() + if sk and skind and sid: + db_payload = _pmem.load_persistent_memory(self.db, skind, sid, sk) + if db_payload is not None: + result = _pmem.merge_memory_payloads(db_payload, result) + except Exception as _pe: + logger.warning(f"加载持久记忆失败: {_pe}") + # 如果缓存未命中,使用default_value if result is None: try: @@ -2369,7 +3078,6 @@ class WorkflowEngine: # 处理value模板 if value_template: # 处理模板语法 {{variable}} - import re value_str = value_template # 替换 {{variable}} 格式的变量 @@ -2428,23 +3136,53 @@ class WorkflowEngine: if not isinstance(memory, dict): memory = {} + # 合并本轮 LLM 的 user_profile_update,便于多轮记住姓名等信息 + upd = input_data.get('user_profile_update') + if isinstance(upd, str) and upd.strip().startswith('{'): + try: + upd = json_module.loads(upd) + except Exception: + upd = {} + if not isinstance(upd, dict): + upd = {} + if not upd.get("name"): + prof = self._extract_user_profile_from_llm_node_outputs() + if prof: + upd = {**upd, **prof} + base_up = memory.get('user_profile') or {} + if not isinstance(base_up, dict): + base_up = {} + memory['user_profile'] = {**base_up, **upd} + # 确保memory中有必要的字段 if 'conversation_history' not in memory: memory['conversation_history'] = [] - if 'user_profile' not in memory: - memory['user_profile'] = {} if 'context' not in memory: memory['context'] = {} - # 获取user_input:优先从query或USER_INPUT获取 - user_input = input_data.get('query') or input_data.get('USER_INPUT') or input_data.get('user_input') or '' + # 获取 user_input(与 LLM 一致,支持 right 等嵌套) + user_input = self._extract_user_message_text(input_data) - # 获取output:从right字段获取,如果是dict则提取right子字段 + # 获取助手回复文本:避免 {{reply}} 占位或未解析的 JSON 串写入记忆 output = input_data.get('right', '') if isinstance(output, dict): - output = output.get('right', '') or output.get('content', '') or str(output) + output = output.get('reply') or output.get('right', '') or output.get('content', '') or str(output) + if isinstance(output, str) and output.strip().startswith('{'): + try: + _jo = json_module.loads(output) + if isinstance(_jo, dict) and _jo.get('reply'): + output = _jo['reply'] + except Exception: + pass if not output: output = '' + os = str(output).strip() + if not os or os in ('{{reply}}', '{{right}}', '{{output}}') or ( + os.startswith('{{') and os.endswith('}}') + ): + reply_guess = self._extract_reply_from_llm_node_outputs() + if reply_guess: + output = reply_guess timestamp = datetime.now().isoformat() @@ -2473,10 +3211,10 @@ class WorkflowEngine: value = eval(value_str, {"__builtins__": {}}, safe_dict) logger.info(f"[rjb] Cache节点 {node_id} value模板执行成功,类型: {type(value)}") - # 确保conversation_history只保留最近的20条(性能优化) + # 确保 conversation_history 只保留最近若干条(性能优化,可在 Cache 节点 data.max_history_length 配置) if isinstance(value, dict) and 'conversation_history' in value: if isinstance(value['conversation_history'], list): - max_history_length = 20 + max_history_length = int(node_data.get('max_history_length', 20)) if len(value['conversation_history']) > max_history_length: value['conversation_history'] = value['conversation_history'][-max_history_length:] logger.info(f"[rjb] 对话历史已截断,保留最近 {max_history_length} 条") @@ -2509,6 +3247,24 @@ class WorkflowEngine: except Exception as e: logger.warning(f"存储到Redis失败: {str(e)}") + # 永久记忆:写入 MySQL(与 user_memory_* 键一致) + try: + from app.services import persistent_memory_service as _pmem + + if ( + _pmem.persist_enabled() + and self.db + and _pmem.is_user_memory_redis_key(key) + and isinstance(value, dict) + ): + sk = _pmem.session_key_from_user_memory_key(key) + skind, sid = self._get_persist_scope() + if sk and skind and sid: + _pmem.save_persistent_memory(self.db, skind, sid, sk, value) + logger.info(f"[rjb] 已持久化记忆到数据库: scope={skind}:{sid}, session={sk[:48]}") + except Exception as _pse: + logger.warning(f"持久化记忆到数据库失败: {_pse}") + # 同时存储到内存缓存 self._cache_store[key] = value self._cache_timestamps[key] = time.time() @@ -2518,10 +3274,29 @@ class WorkflowEngine: elif operation == 'delete': # 删除缓存 + if use_redis and redis_client: + try: + redis_client.delete(key) + except Exception as _de: + logger.warning(f"从Redis删除缓存失败: {_de}") if key in self._cache_store: del self._cache_store[key] if key in self._cache_timestamps: del self._cache_timestamps[key] + try: + from app.services import persistent_memory_service as _pmem + + if ( + _pmem.persist_enabled() + and self.db + and _pmem.is_user_memory_redis_key(key) + ): + sk = _pmem.session_key_from_user_memory_key(key) + skind, sid = self._get_persist_scope() + if sk and skind and sid: + _pmem.delete_persistent_memory(self.db, skind, sid, sk) + except Exception as _pde: + logger.warning(f"删除持久记忆失败: {_pde}") exec_result = {'output': input_data, 'status': 'success'} elif operation == 'clear': @@ -2566,50 +3341,53 @@ class WorkflowEngine: if collection not in self._vector_store: self._vector_store[collection] = [] - # 获取查询向量 - if isinstance(query_vector, str): - # 尝试从input_data中获取 - query_vec = self._get_nested_value(input_data, query_vector.replace('{', '').replace('}', '')) + query_vec = self._resolve_vector_db_query_embedding(input_data, query_vector) + + # 可选:仅检索当前会话用户的向量(metadata.user_id 与请求一致) + filter_uid = None + if isinstance(input_data, dict): + filter_uid = input_data.get("user_id") or input_data.get("USER_ID") + if filter_uid is not None: + filter_uid = str(filter_uid) + + results: List[Dict[str, Any]] = [] + if query_vec is None: + logger.warning( + "vector_db search: 未解析到查询向量,返回空检索(collection=%s)。" + "请检查上游 embedding 与 merge 字段;并确认 API/Celery 已重启加载最新引擎代码。", + collection, + ) + result = [] else: - query_vec = query_vector - - if not isinstance(query_vec, list): - # 如果输入数据包含embedding字段,使用它 - if isinstance(input_data, dict) and 'embedding' in input_data: - query_vec = input_data['embedding'] - else: - raise ValueError("无法获取查询向量") - - # 计算相似度并排序 - results = [] - for item in self._vector_store[collection]: - if 'vector' in item: - # 计算余弦相似度 - import math - vec1 = query_vec - vec2 = item['vector'] - if len(vec1) != len(vec2): - continue - - dot_product = sum(a * b for a, b in zip(vec1, vec2)) - magnitude1 = math.sqrt(sum(a * a for a in vec1)) - magnitude2 = math.sqrt(sum(a * a for a in vec2)) - - if magnitude1 == 0 or magnitude2 == 0: - similarity = 0 - else: - similarity = dot_product / (magnitude1 * magnitude2) - - results.append({ - 'id': item.get('id'), - 'text': item.get('text', ''), - 'metadata': item.get('metadata', {}), - 'similarity': similarity - }) - - # 按相似度排序并返回top_k - results.sort(key=lambda x: x['similarity'], reverse=True) - result = results[:top_k] + # 计算相似度并排序 + for item in self._vector_store[collection]: + if 'vector' in item: + md = item.get("metadata") or {} + if filter_uid and md.get("user_id") not in (None, "", filter_uid): + continue + vec1 = query_vec + vec2 = item['vector'] + if len(vec1) != len(vec2): + continue + + dot_product = sum(a * b for a, b in zip(vec1, vec2)) + magnitude1 = math.sqrt(sum(a * a for a in vec1)) + magnitude2 = math.sqrt(sum(a * a for a in vec2)) + + if magnitude1 == 0 or magnitude2 == 0: + similarity = 0 + else: + similarity = dot_product / (magnitude1 * magnitude2) + + results.append({ + 'id': item.get('id'), + 'text': item.get('text', ''), + 'metadata': item.get('metadata', {}), + 'similarity': similarity + }) + + results.sort(key=lambda x: x['similarity'], reverse=True) + result = results[:top_k] elif operation == 'upsert': # 插入或更新向量 @@ -3704,9 +4482,10 @@ class WorkflowEngine: timeout = node_data.get('timeout', 30) try: if language.lower() == 'python': - # 受限执行环境 + # 受限执行环境(禁止无 __builtins__,否则 isinstance 等不可用) local_vars = {'input_data': input_data, 'result': None} - exec(code, {'__builtins__': {}}, local_vars) # 注意:生产环境需更严格沙箱 + _code_globs = {'__builtins__': _CODE_NODE_SAFE_BUILTINS, 'hashlib': hashlib, 're': re} + exec(code, _code_globs, local_vars) result = local_vars.get('result', local_vars.get('output', input_data)) elif language.lower() == 'javascript': # JS 执行需要外部运行时,这里仅占位 @@ -3832,12 +4611,35 @@ class WorkflowEngine: elif node_type == 'output' or node_type == 'end': # 输出节点:返回最终结果 + import json as json_module # 读取节点配置中的输出格式设置 node_data = node.get('data', {}) output_format = node_data.get('output_format', 'text') # 默认纯文本 logger.debug(f"[rjb] End节点处理: node_id={node_id}, output_format={output_format}, input_data={input_data}, input_data type={type(input_data)}") final_output = input_data + # 上游常用 sourceHandle=right:整包在 input_data['right']。若不展开,仅余 user_id 等顶层字段会被当成最终文本 + if isinstance(input_data, dict): + _ex = input_data.copy() + for _branch_key in ('true', 'false'): + _bp = _ex.get(_branch_key) + if isinstance(_bp, dict): + _ex.update(_bp) + for _k in ('true', 'false', '_condition_result', '_condition_error'): + _ex.pop(_k, None) + if isinstance(_ex.get('right'), dict): + _ex.update(_ex['right']) + elif isinstance(_ex.get('right'), str): + _rs = _ex['right'].strip() + if _rs.startswith('{'): + try: + _rj = json_module.loads(_rs) + if isinstance(_rj, dict): + _ex.update(_rj) + except Exception: + pass + input_data = _ex + final_output = input_data # 如果配置为JSON格式,直接返回原始数据(或格式化的JSON) if output_format == 'json': @@ -3927,7 +4729,6 @@ class WorkflowEngine: # 清理输出文本:移除常见的字段前缀(如 "input: ", "query: " 等) if isinstance(final_output, str): - import re # 移除行首的 "input: ", "query: ", "output: " 等前缀 lines = final_output.split('\n') cleaned_lines = [] @@ -3948,7 +4749,11 @@ class WorkflowEngine: final_output = re.sub(r'^(input|query):\s*', '', final_output, flags=re.IGNORECASE | re.MULTILINE) if not final_output.strip(): final_output = str(input_data) # 如果还是空,使用原始输入 + + if output_format != "json": + final_output = self._replace_if_template_placeholder(final_output) + final_output = self._resolve_end_output_if_vector_metadata(final_output, input_data) logger.debug(f"[rjb] End节点最终输出: output_format={output_format}, final_output={final_output[:100] if isinstance(final_output, str) else type(final_output)}") result = {'output': final_output, 'status': 'success'} if self.logger: @@ -3997,44 +4802,60 @@ class WorkflowEngine: self.node_outputs = {} active_edges = self.edges.copy() # 活跃的边列表 executed_nodes = set() # 已执行的节点 - + execution_sequence: List[str] = [] # 实际执行顺序(用于最终输出节点选择) + # 按拓扑顺序执行节点(动态构建执行图) results = {} - + while True: # 构建当前活跃的执行图 execution_order = self.build_execution_graph(active_edges) - logger.debug(f"[rjb] 当前执行图: {execution_order}, 活跃边数: {len(active_edges)}, 已执行节点: {executed_nodes}") - - # 找到下一个要执行的节点(未执行且入度为0) + forward_reachable = self._forward_reachable_nodes(active_edges) + order_pos = {nid: i for i, nid in enumerate(execution_order)} + # 拓扑序可能不含「双前驱但仅一条分支可达」的汇合点,故候选为全部未执行节点并按 execution_order 优先 + pending_ids = sorted( + (nid for nid in self.nodes if nid not in executed_nodes), + key=lambda x: (order_pos.get(x, 1_000_000), x), + ) + logger.debug( + f"[rjb] 当前执行图: {execution_order}, 活跃边数: {len(active_edges)}, 已执行节点: {executed_nodes}" + ) + next_node_id = None - for node_id in execution_order: - if node_id not in executed_nodes: - # 检查所有前置节点是否已执行 - can_execute = True - incoming_edges = [e for e in active_edges if e['target'] == node_id] - if not incoming_edges: - # 没有入边,可能是起始节点或孤立节点 - if node_id not in [n['id'] for n in self.nodes.values() if n.get('type') == 'start']: - # 不是起始节点,但有入边被过滤了,不应该执行 - logger.debug(f"[rjb] 节点 {node_id} 没有入边,跳过执行") - continue + for node_id in pending_ids: + can_execute = True + incoming_edges = [e for e in active_edges if e["target"] == node_id] + if not incoming_edges: + # 没有入边:仅允许 Start;孤立节点跳过 + if node_id not in [n["id"] for n in self.nodes.values() if n.get("type") == "start"]: + logger.debug(f"[rjb] 节点 {node_id} 没有入边,跳过执行") + continue + else: for edge in incoming_edges: - if edge['source'] not in executed_nodes: + src = edge["source"] + if src not in forward_reachable: + # 条件分支裁剪后不可达的前驱,不参与 gate(OR-join) + continue + if src not in executed_nodes: can_execute = False - logger.debug(f"[rjb] 节点 {node_id} 的前置节点 {edge['source']} 未执行,不能执行") + logger.debug( + f"[rjb] 节点 {node_id} 的前置节点 {src} 未执行,不能执行" + ) break - if can_execute: - next_node_id = node_id - logger.info(f"[rjb] 选择执行节点: {next_node_id}, 类型: {self.nodes[next_node_id].get('type')}, 入边数: {len(incoming_edges)}") - break - + if can_execute: + next_node_id = node_id + logger.info( + f"[rjb] 选择执行节点: {next_node_id}, 类型: {self.nodes[next_node_id].get('type')}, 入边数: {len(incoming_edges)}" + ) + break + if not next_node_id: break # 没有更多节点可执行 node = self.nodes[next_node_id] executed_nodes.add(next_node_id) - + execution_sequence.append(next_node_id) + # 调试:检查节点数据结构 if node.get('type') == 'llm': logger.debug(f"[rjb] 执行LLM节点: node_id={next_node_id}, node keys={list(node.keys())}, data keys={list(node.get('data', {}).keys()) if node.get('data') else []}") @@ -4192,19 +5013,40 @@ class WorkflowEngine: node_id=next_node_id ) - # 返回最终结果(最后一个执行的节点的输出) + # 返回最终结果:优先取 End 类型且无出边的节点,避免向量写入等侧链与 End 同为 sink 时 + # 因 executed_nodes 为 set 迭代顺序不确定而错误返回 upsert 元数据。 if executed_nodes: - # 找到最后一个节点(没有出边的节点) - last_node_id = None - for node_id in executed_nodes: - has_outgoing = any(edge['source'] == node_id for edge in active_edges) - if not has_outgoing: - last_node_id = node_id - break - - if not last_node_id: - # 如果没有找到,使用最后一个执行的节点 - last_node_id = list(executed_nodes)[-1] + sink_nodes = [ + nid + for nid in executed_nodes + if not any(edge["source"] == nid for edge in active_edges) + ] + + def _pick_latest_in_sequence(cands: List[str]) -> Optional[str]: + best: Optional[str] = None + best_pos = -1 + for nid in cands: + try: + pos = execution_sequence.index(nid) + except ValueError: + continue + if pos > best_pos: + best_pos = pos + best = nid + return best + + last_node_id: Optional[str] = None + end_sinks = [ + nid + for nid in sink_nodes + if self.nodes.get(nid, {}).get("type") == "end" + ] + if end_sinks: + last_node_id = _pick_latest_in_sequence(end_sinks) + if not last_node_id and sink_nodes: + last_node_id = _pick_latest_in_sequence(sink_nodes) + if not last_node_id and execution_sequence: + last_node_id = execution_sequence[-1] # 获取最终结果 final_output = self.node_outputs.get(last_node_id) @@ -4241,6 +5083,9 @@ class WorkflowEngine: else: final_output = str(final_output) + final_output = self._resolve_end_output_if_vector_metadata(final_output, final_output) + final_output = self._replace_if_template_placeholder(final_output) + final_result = { 'status': 'completed', 'result': final_output, diff --git a/backend/app/tasks/agent_tasks.py b/backend/app/tasks/agent_tasks.py index bd52b50..39418c5 100644 --- a/backend/app/tasks/agent_tasks.py +++ b/backend/app/tasks/agent_tasks.py @@ -2,6 +2,10 @@ Agent任务 """ from celery import Task +from app.core.tools_bootstrap import ensure_builtin_tools_registered + +ensure_builtin_tools_registered() + from app.core.celery_app import celery_app diff --git a/backend/app/tasks/workflow_tasks.py b/backend/app/tasks/workflow_tasks.py index 35e2dce..2677a84 100644 --- a/backend/app/tasks/workflow_tasks.py +++ b/backend/app/tasks/workflow_tasks.py @@ -2,6 +2,10 @@ 工作流任务 """ from celery import Task +from app.core.tools_bootstrap import ensure_builtin_tools_registered + +ensure_builtin_tools_registered() + from app.core.celery_app import celery_app from app.services.workflow_engine import WorkflowEngine from app.services.execution_logger import ExecutionLogger @@ -15,6 +19,17 @@ import asyncio import time +def _format_task_error(e: Exception) -> str: + """Celery 任务异常写入 DB 时的可读文案(HTTPException.detail 等)。""" + detail = getattr(e, "detail", None) + if isinstance(detail, str) and detail.strip(): + return detail + if detail is not None and detail != "": + return str(detail) + s = str(e).strip() + return s if s else repr(e) + + @celery_app.task(bind=True) def execute_workflow_task( self, @@ -87,14 +102,15 @@ def execute_workflow_task( execution_time = int((time.time() - start_time) * 1000) # 记录错误日志 + err_text = _format_task_error(e) if execution_logger: - execution_logger.error(f"工作流任务执行失败: {str(e)}", data={"error_type": type(e).__name__}) + execution_logger.error(f"工作流任务执行失败: {err_text}", data={"error_type": type(e).__name__}) # 更新执行记录为失败 execution = db.query(Execution).filter(Execution.id == execution_id).first() if execution: execution.status = "failed" - execution.error_message = str(e) + execution.error_message = err_text execution.execution_time = execution_time db.commit() diff --git a/backend/env.example b/backend/env.example index 97c9726..adc0f53 100644 --- a/backend/env.example +++ b/backend/env.example @@ -10,6 +10,14 @@ DATABASE_URL=mysql+pymysql://root:!Rjb12191@gz-cynosdbmysql-grp-d26pzce5.sql.ten # Redis配置 REDIS_URL=redis://localhost:6379/0 +# 会话记忆是否同时写入 MySQL(永久记忆);设为 false 则仅 Redis + 进程内缓存 +MEMORY_PERSIST_DB_ENABLED=true + +# 本地文件工具 file_read / file_write 允许访问的根目录(可选)。留空则默认为「backend 的上一级」即仓库根 +# LOCAL_FILE_TOOLS_ROOT=D:/aaa/aiagent +# LOCAL_FILE_READ_MAX_BYTES=2097152 +# LOCAL_FILE_WRITE_MAX_BYTES=2097152 + # CORS配置(多个地址用逗号分隔) CORS_ORIGINS=http://localhost:3000,http://127.0.0.1:3000,http://localhost:8038,http://101.43.95.130:8038 @@ -17,7 +25,7 @@ CORS_ORIGINS=http://localhost:3000,http://127.0.0.1:3000,http://localhost:8038,h OPENAI_API_KEY=your-openai-api-key OPENAI_BASE_URL=https://api.openai.com/v1 -# DeepSeek配置(可选) +# DeepSeek配置(工作流 LLM 节点若选 DeepSeek 则必填;否则调用会 401) DEEPSEEK_API_KEY=your-deepseek-api-key DEEPSEEK_BASE_URL=https://api.deepseek.com diff --git a/backend/scripts/create_zhini_kefu_10.py b/backend/scripts/create_zhini_kefu_10.py new file mode 100644 index 0000000..4ff338a --- /dev/null +++ b/backend/scripts/create_zhini_kefu_10.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python3 +""" +从「知你客服9号」复制为「知你客服10号」,改进记忆与连接策略: + +1. 在 json-parse 之后插入 code-identity-merge:把用户指定的「助手称呼」写入 memory.context.assistant_display_name + (与 user_profile.name 区分)。 +2. code-build-context:向 LLM 注入 assistant_display_name。 +3. llm-unified 提示词:自我介绍优先用 assistant_display_name;user_profile.name 仅表示用户。 +4. condition-need-summary:history_count >= 2 即走摘要分支(原常为 >=4,summary 易长期为空)。 + +需 Celery 已加载含 re/hashlib 注入的 workflow_engine(代码节点内勿写 import re)。 +""" +from __future__ import annotations + +import json +import os +import sys + +import requests + +BASE = os.getenv("PLATFORM_BASE_URL", "http://127.0.0.1:8037").rstrip("/") +SOURCE_AGENT_ID = os.getenv("ZHINI_9_AGENT_ID", "de5932d6-3c05-4b27-ab08-f6cb403ce4b9") +USER = os.getenv("PLATFORM_USERNAME", "admin") +PWD = os.getenv("PLATFORM_PASSWORD", "123456") + +NEW_NAME = "知你客服10号" +NEW_DESC = ( + "在知你客服9号基础上:① memory.context.assistant_display_name 存助手对外称呼,与 user_profile.name(用户)分离;" + "② 摘要分支 history_count>=2 更易生成 conversation_summary;" + "③ 工作流在 json-parse 后增加 code-identity-merge 再进入抽取/写记忆。" +) + +CODE_IDENTITY_MERGE = r"""mem = dict(input_data.get('memory') or {}) +ctx = dict(mem.get('context') or {}) +q = str(input_data.get('query') or input_data.get('user_input') or '').strip() +for pat in ( + r'你的\s*名字\s*叫\s*([^\s,。!?,.!?]{1,32})', + r'你\s*叫\s*(?!什么)([^\s,。!?,.!?]{1,32})', + r'(?:客服|助手)\s*叫\s*([^\s,。!?,.!?]{1,32})', +): + m = re.search(pat, q) + if not m: + continue + name = m.group(1).strip().strip(',。!?,.!?') + if not name: + continue + if any(b in name for b in ('什么', '哪位', '谁', '啥')): + continue + ctx['assistant_display_name'] = name + break +mem['context'] = ctx +out = dict(input_data) +out['memory'] = mem +result = out +""" + +CODE_BUILD_CONTEXT_V10 = r"""left = input_data.get('left') or {} +right = input_data.get('right') or [] +if not isinstance(right, list): + right = [] +mem = left.get('memory') or {} +hist = mem.get('conversation_history') or [] +if not isinstance(hist, list): + hist = [] +summary = mem.get('conversation_summary') or '' +ctx = mem.get('context') or {} +if not isinstance(ctx, dict): + ctx = {} +assistant_name = str(ctx.get('assistant_display_name') or '').strip() +recent_n = 16 +recent = hist[-recent_n:] if len(hist) > recent_n else hist +recent_str = '\n'.join(f"{x.get('role', '')}: {x.get('content', '')}" for x in recent) +vec_str = '\n'.join((rec.get('text') or rec.get('content') or '') for rec in right) +query = (left.get('user_input') or left.get('query') or '').strip() +older = hist[:-recent_n] if len(hist) > recent_n else [] + + +def _tok(s): + s = str(s) + ch = {c for c in s if '\u4e00' <= c <= '\u9fff'} + wd = set(s.lower().replace('\n', ' ').split()) + return ch | wd + + +qt = _tok(query) if query else set() +scored = [] +for m in older: + c = str(m.get('content', '')) + if not c: + continue + sc = len(qt & _tok(c)) if qt else 0 + if sc > 0: + scored.append((sc, str(m.get('role', '')), c[:240])) +scored.sort(key=lambda x: -x[0]) +kw_lines = [f"{role}: {text}" for _, role, text in scored[:6]] +kw_str = '\n'.join(kw_lines) +relevant_str = vec_str.strip() +if kw_str: + if relevant_str: + relevant_str = relevant_str + '\n---\n关键词相关历史:\n' + kw_str + else: + relevant_str = '关键词相关历史:\n' + kw_str +result = { + 'user_input': left.get('user_input') or left.get('query') or '', + 'memory': { + 'user_profile': mem.get('user_profile') or {}, + 'conversation_summary': summary, + 'relevant_from_retrieval': relevant_str, + 'recent_turns': recent_str, + 'assistant_display_name': assistant_name, + }, + 'query': left.get('query') or '', + 'user_id': left.get('user_id'), +} +""" + +LLM_PROMPT_V10 = """你是客服助手。根据用户输入、用户画像、助手称呼、远期摘要、检索片段与最近对话生成回复。 + +【称呼规则】 +- user_profile.name(及同类字段)仅表示「用户」的昵称/姓名。 +- memory.assistant_display_name 表示用户为你指定的「对外称呼」。若非空,用户问「你叫什么名字」「你是谁」时,须用该称呼自称(可带「客服助手」类前缀,但核心名须一致);禁止忽略已保存的 assistant_display_name 改回默认虚构名。 +- 若 assistant_display_name 为空,可自称「客服助手」等通用名。 + +【任务】 +1)判断意图;2)自然、有帮助的 reply(JSON 内一条字符串); +3)用户自我介绍姓名时写入 user_profile(如 name),勿把用户姓名写入 assistant_display_name; +4)用户问「我叫什么」时依据 user_profile 与历史/摘要回答。 + +只输出一行合法 JSON,不要 markdown。示例: +{"intent":"chat","reply":"你好!","user_profile":{"name":"小明"}} + +用户输入:{{user_input}} +用户画像:{{memory.user_profile}} +助手对外称呼(用户指定,可能为空):{{memory.assistant_display_name}} +远期摘要:{{memory.conversation_summary}} +相关历史(检索):{{memory.relevant_from_retrieval}} +最近几轮:{{memory.recent_turns}} + +要求:reply 200 字以内;user_profile 为对象。""" + + +def _insert_identity_node_and_edges(wf: dict) -> None: + nodes = wf.setdefault("nodes", []) + edges = wf.setdefault("edges", []) + if any(n.get("id") == "code-identity-merge" for n in nodes): + return + # 参考 json-parse 位置:在其右侧插入 + jx, jy = 2200, 400 + for n in nodes: + if n.get("id") == "json-parse": + pos = n.get("position") or {} + jx = pos.get("x", jx) + 80 + jy = pos.get("y", jy) + break + nodes.append( + { + "id": "code-identity-merge", + "type": "code", + "position": {"x": jx, "y": jy}, + "data": { + "label": "合并助手称呼到 context", + "language": "python", + "code": CODE_IDENTITY_MERGE, + }, + } + ) + new_edges = [] + removed = False + for e in edges: + if e.get("source") == "json-parse" and e.get("target") == "transform-extract-reply-and-profile": + removed = True + continue + new_edges.append(e) + if not removed: + print("警告: 未找到 json-parse -> transform-extract-reply-and-profile 的边,仍追加新边", file=sys.stderr) + new_edges.append( + { + "id": "e11a-identity", + "source": "json-parse", + "target": "code-identity-merge", + "sourceHandle": "right", + "targetHandle": "left", + } + ) + new_edges.append( + { + "id": "e11b-identity", + "source": "code-identity-merge", + "target": "transform-extract-reply-and-profile", + "sourceHandle": "right", + "targetHandle": "left", + } + ) + wf["edges"] = new_edges + + +def _patch_nodes(wf: dict) -> None: + nodes = wf.get("nodes") or [] + for n in nodes: + nid = n.get("id") + if nid == "llm-unified": + n.setdefault("data", {})["prompt"] = LLM_PROMPT_V10 + elif nid == "code-build-context": + n.setdefault("data", {})["code"] = CODE_BUILD_CONTEXT_V10 + elif nid == "condition-need-summary": + d = n.setdefault("data", {}) + c = d.get("condition", "") + if "history_count" in c and ">=" in c: + d["condition"] = "{history_count} >= 2" + else: + d["condition"] = "{history_count} >= 2" + elif nid == "code-identity-merge": + n.setdefault("data", {})["code"] = CODE_IDENTITY_MERGE + + +def main() -> int: + r = requests.post( + f"{BASE}/api/v1/auth/login", + data={"username": USER, "password": PWD}, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + timeout=15, + ) + if r.status_code != 200: + print("登录失败:", r.status_code, r.text[:500], file=sys.stderr) + return 1 + token = r.json().get("access_token") + if not token: + print("无 access_token", file=sys.stderr) + return 1 + h = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"} + + dup = requests.post( + f"{BASE}/api/v1/agents/{SOURCE_AGENT_ID}/duplicate", + headers=h, + json={"name": NEW_NAME}, + timeout=30, + ) + if dup.status_code != 201: + print("复制失败:", dup.status_code, dup.text[:800], file=sys.stderr) + return 1 + new_id = dup.json()["id"] + print("已创建副本:", new_id, NEW_NAME) + + g = requests.get(f"{BASE}/api/v1/agents/{new_id}", headers=h, timeout=30) + if g.status_code != 200: + print("读取 Agent 失败:", g.text, file=sys.stderr) + return 1 + agent = g.json() + wf = agent["workflow_config"] + _insert_identity_node_and_edges(wf) + _patch_nodes(wf) + + up = requests.put( + f"{BASE}/api/v1/agents/{new_id}", + headers=h, + json={"description": NEW_DESC, "workflow_config": wf}, + timeout=120, + ) + if up.status_code != 200: + print("更新失败:", up.status_code, up.text[:800], file=sys.stderr) + return 1 + print("已更新:identity 节点与边、摘要阈值>=2、上下文与 LLM 提示") + print("Agent ID:", new_id) + print(json.dumps({"id": new_id, "name": NEW_NAME}, ensure_ascii=False)) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/create_zhini_kefu_11.py b/backend/scripts/create_zhini_kefu_11.py new file mode 100644 index 0000000..034032b --- /dev/null +++ b/backend/scripts/create_zhini_kefu_11.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python3 +""" +从「知你客服10号」复制为「知你客服11号」: +- 在 llm-unified 上开启工具调用,仅启用 http_request(拉取网页/API 再总结)。 +- 提示词要求:需要访问 URL 时必须先调工具;最终对用户仍只输出一行合法 JSON(供下游 json-parse)。 + +环境变量:PLATFORM_BASE_URL、ZHINI_10_AGENT_ID(默认 10 号 ID)、登录账号密码。 +""" +from __future__ import annotations + +import json +import os +import sys + +import requests + +BASE = os.getenv("PLATFORM_BASE_URL", "http://127.0.0.1:8037").rstrip("/") +SOURCE_AGENT_ID = os.getenv("ZHINI_10_AGENT_ID", "c853482b-d298-44e4-9862-c84318f71abb") +USER = os.getenv("PLATFORM_USERNAME", "admin") +PWD = os.getenv("PLATFORM_PASSWORD", "123456") + +NEW_NAME = "知你客服11号" +NEW_DESC = ( + "在知你客服10号基础上:主 LLM 开启工具调用,仅注册 http_request," + "可对用户给出的 http(s) 链接先抓取再摘要;" + "提示词要求最终仍输出单行 JSON,兼容原记忆/解析链路。" +) + +LLM_PROMPT_V11 = """你是客服助手。根据用户输入、用户画像、助手称呼、远期摘要、检索片段与最近对话生成回复。 + +【工具 http_request(重要)】 +- 当用户给出明确的 http:// 或 https:// 链接,并希望你「看看网页讲了什么」「总结这个页面」「打开链接」等时,你必须先调用工具 http_request:参数 url 为完整链接,method 一般填 GET(调用时 method 为必填)。 +- 根据工具返回的 JSON 中的 body 字段作答:body 可能是 HTML 或 JSON 文本;请提炼要点写入最终 reply,不要谎称「无法直接访问」。 +- 若 body 过长,reply 中用简明中文摘要即可(200 字内为主)。 +- 非 URL 类普通问答不要无故调用 http_request。 + +【称呼规则】(与 10 号一致) +- user_profile.name 仅表示「用户」昵称/姓名。 +- memory.assistant_display_name 非空时,用户问「你叫什么名字」等须用该称呼自称;勿把用户姓名写入 assistant_display_name。 +- 用户自我介绍姓名时写入 user_profile,勿写入 assistant_display_name。 + +【最终输出格式(强制)】 +- 无论你调用了几次工具,给用户的**最后一条消息内容**必须是**一行合法 JSON**,且**不要** markdown、**不要**代码围栏。 +- JSON 必须可被解析,且至少包含:intent(字符串)、reply(字符串)、user_profile(对象,无新信息可为 {})。 +- 示例:{"intent":"summarize_url","reply":"该网页主要介绍了……","user_profile":{}} + +上下文: +用户输入:{{user_input}} +用户画像:{{memory.user_profile}} +助手对外称呼:{{memory.assistant_display_name}} +远期摘要:{{memory.conversation_summary}} +相关历史(检索):{{memory.relevant_from_retrieval}} +最近几轮:{{memory.recent_turns}} +""" + + +def _patch_llm_unified(wf: dict) -> None: + for n in wf.get("nodes") or []: + if n.get("id") != "llm-unified": + continue + d = n.setdefault("data", {}) + d["prompt"] = LLM_PROMPT_V11 + d["enable_tools"] = True + d["tools"] = ["http_request"] + d["selected_tools"] = ["http_request"] + return + print("警告: 未找到节点 llm-unified", file=sys.stderr) + + +def main() -> int: + r = requests.post( + f"{BASE}/api/v1/auth/login", + data={"username": USER, "password": PWD}, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + timeout=15, + ) + if r.status_code != 200: + print("登录失败:", r.status_code, r.text[:500], file=sys.stderr) + return 1 + token = r.json().get("access_token") + if not token: + print("无 access_token", file=sys.stderr) + return 1 + h = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"} + + dup = requests.post( + f"{BASE}/api/v1/agents/{SOURCE_AGENT_ID}/duplicate", + headers=h, + json={"name": NEW_NAME}, + timeout=30, + ) + if dup.status_code != 201: + print("复制失败:", dup.status_code, dup.text[:800], file=sys.stderr) + return 1 + new_id = dup.json()["id"] + print("已创建副本:", new_id, NEW_NAME) + + g = requests.get(f"{BASE}/api/v1/agents/{new_id}", headers=h, timeout=30) + if g.status_code != 200: + print("读取 Agent 失败:", g.text, file=sys.stderr) + return 1 + agent = g.json() + wf = agent["workflow_config"] + _patch_llm_unified(wf) + + up = requests.put( + f"{BASE}/api/v1/agents/{new_id}", + headers=h, + json={"description": NEW_DESC, "workflow_config": wf}, + timeout=120, + ) + if up.status_code != 200: + print("更新失败:", up.status_code, up.text[:800], file=sys.stderr) + return 1 + print("已开启 enable_tools + http_request,并更新提示词") + print("Agent ID:", new_id) + print(json.dumps({"id": new_id, "name": NEW_NAME}, ensure_ascii=False)) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/create_zhini_kefu_12.py b/backend/scripts/create_zhini_kefu_12.py new file mode 100644 index 0000000..6b98f05 --- /dev/null +++ b/backend/scripts/create_zhini_kefu_12.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python3 +""" +从「知你客服11号」复制为「知你客服12号」: +- llm-unified 开启工具:http_request + file_read + file_write(受工作区根目录与大小限制约束,见 LOCAL_FILE_TOOLS_ROOT)。 +- 提示词:URL 用 http_request;读写本地文件用 file_read / file_write;最终仍输出单行 JSON。 + +环境变量:PLATFORM_BASE_URL、ZHINI_11_AGENT_ID(默认 11 号 ID)、登录账号密码。 +""" +from __future__ import annotations + +import json +import os +import sys + +import requests + +BASE = os.getenv("PLATFORM_BASE_URL", "http://127.0.0.1:8037").rstrip("/") +SOURCE_AGENT_ID = os.getenv("ZHINI_11_AGENT_ID", "d39748ad-277f-48ac-9eb5-168ad2f1b470") +USER = os.getenv("PLATFORM_USERNAME", "admin") +PWD = os.getenv("PLATFORM_PASSWORD", "123456") + +NEW_NAME = "知你客服12号" +NEW_DESC = ( + "在知你客服11号基础上:增加本地文件读写工具 file_read、file_write(路径限制在平台配置的工作区内," + "默认可视为仓库根目录),并保留 http_request;" + "输出仍为单行 JSON,兼容记忆与 json-parse 链路。" +) + +TOOLS_V12 = ["http_request", "file_read", "file_write", "system_info"] + +LLM_PROMPT_V12 = """你是客服助手。根据用户输入、用户画像、助手称呼、远期摘要、检索片段与最近对话生成回复。 + +【工具 http_request】 +- 用户给出 http(s) 链接且需要抓网页/API 时,先调用 http_request:参数 url 为完整链接,method 必填(一般为 GET)。 +- 根据返回 JSON 中的 body 字段提炼要点;非 URL 问答不要无故调用。 + +【工具 system_info(工作区路径)】 +- 用户问「工作区路径」「能访问哪个目录」「file 根目录在哪」时,**必须调用 system_info**,用返回 JSON 里的 **local_file_workspace_root** 原样告知用户(不要用「临时目录」「无法显示」等推脱)。 + +【工具 file_read / file_write(本地文件)】 +- 仅当用户明确要「读文件」「写入某路径」「保存到本地文件」等时使用。 +- file_read:参数 file_path 可为**相对工作区根的相对路径**,或**落在工作区根之下的绝对路径**(Windows 如 `D:\\...`,Linux 如 `/home/...`),二者等价,由后端校验。 +- file_write:参数 file_path、content;mode 用 w 覆盖或 a 追加。写入前确认路径有意、避免覆盖重要文件;不要写入密钥、令牌。 +- **禁止**以「不能访问 D: 盘」「只能相对路径」「工具看不到绝对路径」等理由拒绝用户:只要用户给的绝对路径以 `system_info` 返回的 `local_file_workspace_root` 为前缀(同一盘符、规范化后在其子路径下),就应**直接调用 file_write**,例如根为 `D:\\aaa\\aiagent` 时,`D:\\aaa\\aiagent\\user_data\\xxx.md` **合法**,可优先用用户原文路径或简写为相对路径 `user_data/xxx.md`。 +- 路径必须落在平台允许的工作区内,否则会报错;不要尝试访问工作区外的路径。 +- **禁止**假设工作区是 `/workspace` 或未经验证的目录;工作区根**只信** `local_file_workspace_root`。 +- **每次调用 file_write / file_read 后,必须在最终 reply 中说明工具返回结果**:成功则写明路径与要点;失败则引用返回 JSON 中的 error 字段,不得假装已成功。 +- **严禁编造工具返回**:reply 中若引用 file_write/file_read/system_info 的 JSON,必须与工具实际返回字符串一致(可原样粘贴)。禁止臆造路径(例如 /tmp/...、/workspace/...)或与当前系统不符的路径;若未调用工具,禁止在 reply 里写伪造的 JSON。 + +【称呼规则】(与 10/11 一致) +- user_profile.name 表示用户昵称;assistant_display_name 表示用户为你起的称呼。 +- 用户问「你叫什么」时用 assistant_display_name(若有);勿把用户姓名写入 assistant_display_name。 + +【最终输出格式(强制)】 +- 最后一条回复必须是**一行合法 JSON**,无 markdown、无代码围栏;含 intent、reply、user_profile(对象)。 + +上下文: +用户输入:{{user_input}} +用户画像:{{memory.user_profile}} +助手对外称呼:{{memory.assistant_display_name}} +远期摘要:{{memory.conversation_summary}} +相关历史(检索):{{memory.relevant_from_retrieval}} +最近几轮:{{memory.recent_turns}} +""" + + +def _patch_llm_unified(wf: dict) -> None: + for n in wf.get("nodes") or []: + if n.get("id") != "llm-unified": + continue + d = n.setdefault("data", {}) + d["prompt"] = LLM_PROMPT_V12 + d["enable_tools"] = True + d["tools"] = list(TOOLS_V12) + d["selected_tools"] = list(TOOLS_V12) + return + print("警告: 未找到节点 llm-unified", file=sys.stderr) + + +def main() -> int: + r = requests.post( + f"{BASE}/api/v1/auth/login", + data={"username": USER, "password": PWD}, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + timeout=15, + ) + if r.status_code != 200: + print("登录失败:", r.status_code, r.text[:500], file=sys.stderr) + return 1 + token = r.json().get("access_token") + if not token: + print("无 access_token", file=sys.stderr) + return 1 + h = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"} + + dup = requests.post( + f"{BASE}/api/v1/agents/{SOURCE_AGENT_ID}/duplicate", + headers=h, + json={"name": NEW_NAME}, + timeout=30, + ) + if dup.status_code != 201: + print("复制失败:", dup.status_code, dup.text[:800], file=sys.stderr) + return 1 + new_id = dup.json()["id"] + print("已创建副本:", new_id, NEW_NAME) + + g = requests.get(f"{BASE}/api/v1/agents/{new_id}", headers=h, timeout=30) + if g.status_code != 200: + print("读取 Agent 失败:", g.text, file=sys.stderr) + return 1 + agent = g.json() + wf = agent["workflow_config"] + _patch_llm_unified(wf) + + up = requests.put( + f"{BASE}/api/v1/agents/{new_id}", + headers=h, + json={"description": NEW_DESC, "workflow_config": wf}, + timeout=120, + ) + if up.status_code != 200: + print("更新失败:", up.status_code, up.text[:800], file=sys.stderr) + return 1 + print("已注册工具:", ", ".join(TOOLS_V12)) + print("Agent ID:", new_id) + print(json.dumps({"id": new_id, "name": NEW_NAME}, ensure_ascii=False)) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/create_zhini_kefu_13.py b/backend/scripts/create_zhini_kefu_13.py new file mode 100644 index 0000000..1dce4ff --- /dev/null +++ b/backend/scripts/create_zhini_kefu_13.py @@ -0,0 +1,280 @@ +#!/usr/bin/env python3 +""" +从「知你客服12号」复制为「知你客服13号」: + +- **画布**:去除自环边、合并重复的 source→target 边;统一锚点 sourceHandle=right、targetHandle=left; + 按从「开始」出发的分层布局重排节点坐标,减少交叉与「绕圈」观感(不改变节点 id/业务配置)。 +- **提示词**:在 12 号能力(http_request、file_read、file_write、system_info)基础上,增加工具调用纪律 + (避免同轮重复 file_write、勿刷屏 DSML)。 + +若已存在同名 Agent「知你客服13号」,则仅更新其 workflow + 描述(不新建)。 + +用法: + cd backend && .\\venv\\Scripts\\python.exe scripts/create_zhini_kefu_13.py + +环境变量: PLATFORM_BASE_URL, PLATFORM_USERNAME, PLATFORM_PASSWORD, + SOURCE_AGENT_NAME(默认 知你客服12号), TARGET_NAME(默认 知你客服13号) +""" +from __future__ import annotations + +import copy +import json +import os +import sys +from collections import defaultdict +from typing import Any, Dict, List, Optional, Tuple + +import requests + +BASE = os.getenv("PLATFORM_BASE_URL", "http://127.0.0.1:8037").rstrip("/") +USER = os.getenv("PLATFORM_USERNAME", "admin") +PWD = os.getenv("PLATFORM_PASSWORD", "123456") +SOURCE_NAME = os.getenv("SOURCE_AGENT_NAME", "知你客服12号") +TARGET_NAME = os.getenv("TARGET_NAME", "知你客服13号") + +TOOLS_V13 = ["http_request", "file_read", "file_write", "system_info"] + +# 在 12 号提示词基础上追加(create_zhini_kefu_12 正文过长时由脚本从源 Agent 读取再拼接) +PROMPT_V13_EXTRA = """ + +【画布/执行说明(13 号)】 +- 工作流连线已整理为从左到右主线,减少自环与重复边带来的误解;逻辑仍以引擎与节点配置为准。 + +【工具调用纪律(13 号)】 +- 同一轮用户请求中,对 **file_write** 无特殊说明时不要重复调用多次;每个明确文件需求通常 **一次写入** 即可。 +- 不要在回复正文中 **重复刷屏** DSML、`<|DSML|`、`invoke name=` 等标签行;工具返回后应用自然语言说明,并仍以 **单行 JSON** 收尾。 +- 若上一轮已写入成功,除非用户要求修改或另存,不要再次写入相同路径。 + +【单行 JSON 与用户画像(与 12 号一致,勿留空)】 +- 最后一行 JSON 的 user_profile 须与事实一致:用户已告知昵称时须包含 "name"(如「小七」);**禁止**用空的 user_profile 覆盖会话记忆。 +- 仅靠 file_write 写入本地文件**不能**替代上述 JSON 中的 user_profile;多轮称呼以 JSON + 会话记忆为准。 +""" + + +def _sanitize_edges(edges: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """去掉自环、按 (source,target) 去重,统一左右锚点。""" + seen: set = set() + out: List[Dict[str, Any]] = [] + for e in edges or []: + s, t = e.get("source"), e.get("target") + if not s or not t: + continue + if s == t: + continue + key = (s, t) + if key in seen: + continue + seen.add(key) + ne = dict(e) + ne["sourceHandle"] = "right" + ne["targetHandle"] = "left" + if not ne.get("id"): + ne["id"] = f"edge_{s}_{t}" + out.append(ne) + return out + + +def _find_start_node_ids(nodes: List[Dict[str, Any]]) -> List[str]: + ids: List[str] = [] + for n in nodes or []: + nid = n.get("id") or "" + nt = (n.get("type") or (n.get("data") or {}).get("type") or "").lower() + if nt == "start" or nid in ("start", "start-1") or str(nid).startswith("start-"): + ids.append(nid) + return ids + + +def _compute_ranks( + nodes: List[Dict[str, Any]], edges: List[Dict[str, Any]] +) -> Dict[str, int]: + node_ids = [n["id"] for n in nodes if n.get("id")] + start_ids = _find_start_node_ids(nodes) + incoming: Dict[str, int] = {nid: 0 for nid in node_ids} + for e in edges: + s, t = e.get("source"), e.get("target") + if not s or not t or s == t: + continue + if t in incoming: + incoming[t] += 1 + if not start_ids: + start_ids = [nid for nid in node_ids if incoming.get(nid, 0) == 0] or ([node_ids[0]] if node_ids else []) + + rank: Dict[str, int] = {s: 0 for s in start_ids} + nmax = max(len(nodes), 8) + for _ in range(nmax + 5): + updated = False + for e in edges: + s, t = e.get("source"), e.get("target") + if not s or not t or s == t: + continue + if s not in rank: + continue + nv = rank[s] + 1 + if t not in rank or rank[t] < nv: + rank[t] = nv + updated = True + if not updated: + break + max_r = max(rank.values(), default=0) + for nid in node_ids: + if nid not in rank: + rank[nid] = max_r + 1 + max_r += 1 + return rank + + +def _apply_layered_positions(nodes: List[Dict[str, Any]], ranks: Dict[str, int]) -> None: + layers: Dict[int, List[str]] = defaultdict(list) + for nid, r in ranks.items(): + layers[r].append(nid) + for r in layers: + layers[r].sort() + + x0, y0 = 80.0, 140.0 + x_step = 300.0 + y_step = 110.0 + + for r in sorted(layers.keys()): + ids = layers[r] + nlen = len(ids) + y_base = y0 - (nlen - 1) * y_step / 2.0 + for j, nid in enumerate(ids): + for node in nodes: + if node.get("id") != nid: + continue + pos = node.setdefault("position", {}) + pos["x"] = x0 + r * x_step + pos["y"] = y_base + j * y_step + break + + +def improve_workflow_layout_and_edges(wf: Dict[str, Any]) -> Tuple[int, int]: + """ + 返回 (去掉的自环条数, 去掉的重复边条数)。 + """ + nodes = wf.get("nodes") or [] + raw_edges = wf.get("edges") or [] + loops = sum( + 1 + for e in raw_edges + if e.get("source") and e.get("target") and e.get("source") == e.get("target") + ) + clean = _sanitize_edges(raw_edges) + removed_dup = len(raw_edges) - len(clean) - loops + + wf["edges"] = clean + + ranks = _compute_ranks(nodes, clean) + _apply_layered_positions(nodes, ranks) + return loops, max(0, removed_dup) + + +def _patch_llm_unified(wf: dict, base_prompt: Optional[str] = None) -> None: + for n in wf.get("nodes") or []: + if n.get("id") != "llm-unified": + continue + d = n.setdefault("data", {}) + prompt = base_prompt if base_prompt else d.get("prompt") or "" + if PROMPT_V13_EXTRA.strip() not in prompt: + prompt = (prompt.rstrip() + "\n" + PROMPT_V13_EXTRA).strip() + d["prompt"] = prompt + d["enable_tools"] = True + d["tools"] = list(TOOLS_V13) + d["selected_tools"] = list(TOOLS_V13) + return + print("警告: 未找到节点 llm-unified", file=sys.stderr) + + +def _find_agent_id_by_name(h: Dict[str, str], name: str) -> Optional[str]: + r = requests.get(f"{BASE}/api/v1/agents", params={"search": name, "limit": 50}, headers=h, timeout=30) + if r.status_code != 200: + return None + for a in r.json() or []: + if a.get("name") == name: + return a.get("id") + return None + + +def main() -> int: + r = requests.post( + f"{BASE}/api/v1/auth/login", + data={"username": USER, "password": PWD}, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + timeout=15, + ) + if r.status_code != 200: + print("登录失败:", r.status_code, r.text[:500], file=sys.stderr) + return 1 + token = r.json().get("access_token") + if not token: + print("无 access_token", file=sys.stderr) + return 1 + h = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"} + + src_id = _find_agent_id_by_name(h, SOURCE_NAME) + if not src_id: + print(f"未找到源 Agent: {SOURCE_NAME}", file=sys.stderr) + return 1 + + existing_13 = _find_agent_id_by_name(h, TARGET_NAME) + if existing_13: + print("已存在", TARGET_NAME, "-> 仅更新工作流", existing_13) + new_id = existing_13 + g = requests.get(f"{BASE}/api/v1/agents/{new_id}", headers=h, timeout=30) + if g.status_code != 200: + print("读取失败:", g.text, file=sys.stderr) + return 1 + agent = g.json() + else: + dup = requests.post( + f"{BASE}/api/v1/agents/{src_id}/duplicate", + headers=h, + json={"name": TARGET_NAME}, + timeout=60, + ) + if dup.status_code != 201: + print("复制失败:", dup.status_code, dup.text[:800], file=sys.stderr) + return 1 + new_id = dup.json()["id"] + agent = dup.json() + print("已创建副本:", new_id, TARGET_NAME) + + wf = copy.deepcopy(agent["workflow_config"]) + loops, dup_edges = improve_workflow_layout_and_edges(wf) + print(f"连线整理: 去掉自环 {loops} 条, 合并重复边 {dup_edges} 条") + + g2 = requests.get(f"{BASE}/api/v1/agents/{src_id}", headers=h, timeout=30) + base_prompt = None + if g2.status_code == 200: + try: + for n in g2.json().get("workflow_config", {}).get("nodes") or []: + if n.get("id") == "llm-unified": + base_prompt = (n.get("data") or {}).get("prompt") + break + except Exception: + pass + _patch_llm_unified(wf, base_prompt=base_prompt) + + desc = ( + "在知你客服12号基础上:整理工作流连线(去自环/重复边、分层布局、统一左右锚点)," + "并强化工具调用纪律(避免同轮重复 file_write、勿刷屏 DSML);" + "工具仍为 http_request、file_read、file_write、system_info;输出单行 JSON。" + ) + + up = requests.put( + f"{BASE}/api/v1/agents/{new_id}", + headers=h, + json={"description": desc, "workflow_config": wf}, + timeout=120, + ) + if up.status_code != 200: + print("更新失败:", up.status_code, up.text[:1200], file=sys.stderr) + return 1 + print("已写入工具:", ", ".join(TOOLS_V13)) + print("Agent ID:", new_id) + print(json.dumps({"id": new_id, "name": TARGET_NAME}, ensure_ascii=False)) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/create_zhini_kefu_14.py b/backend/scripts/create_zhini_kefu_14.py new file mode 100644 index 0000000..9a4f408 --- /dev/null +++ b/backend/scripts/create_zhini_kefu_14.py @@ -0,0 +1,297 @@ +#!/usr/bin/env python3 +""" +从「知你客服13号」复制为「知你客服14号」: + +- **画布**:与 13 号脚本相同(去自环/重复边、分层布局、统一左右锚点)。 +- **工具**:在 13 号(http_request、file_read、file_write、system_info)基础上,增加平台已注册的内置工具: + text_analyze、datetime、math_calculate、json_process、database_query、adb_log(与 `tools_bootstrap` 对齐)。 +- **提示词**:在 13 号提示词后追加 14 号扩展工具说明与纪律。 + +若已存在同名 Agent「知你客服14号」,则仅更新其 workflow + 描述(不新建)。 + +用法: + cd backend && .\\venv\\Scripts\\python.exe scripts/create_zhini_kefu_14.py + +环境变量: PLATFORM_BASE_URL, PLATFORM_USERNAME, PLATFORM_PASSWORD, + SOURCE_AGENT_NAME(默认 知你客服13号), TARGET_NAME(默认 知你客服14号) +""" +from __future__ import annotations + +import copy +import json +import os +import sys +from collections import defaultdict +from typing import Any, Dict, List, Optional, Tuple + +import requests + +BASE = os.getenv("PLATFORM_BASE_URL", "http://127.0.0.1:8037").rstrip("/") +USER = os.getenv("PLATFORM_USERNAME", "admin") +PWD = os.getenv("PLATFORM_PASSWORD", "123456") +SOURCE_NAME = os.getenv("SOURCE_AGENT_NAME", "知你客服13号") +TARGET_NAME = os.getenv("TARGET_NAME", "知你客服14号") + +# 与 app.core.tools_bootstrap.ensure_builtin_tools_registered 中注册列表一致(全量内置工具) +TOOLS_V14: List[str] = [ + "http_request", + "file_read", + "file_write", + "text_analyze", + "datetime", + "math_calculate", + "system_info", + "json_process", + "database_query", + "adb_log", +] + +PROMPT_V14_MARKER = "【知你客服 14 号 · 扩展工具】" + +PROMPT_V14_EXTRA = f""" + +{PROMPT_V14_MARKER} +在 13 号既有能力与纪律之上,可使用下列额外工具(按需调用,避免无关刷屏;仍以 **单行 JSON** 收尾): + +【text_analyze】文本分析:`text` 为正文,`operation` 为 `count`(字数/行数等统计)、`keywords`(简单词频)、`summary`(取前几句摘要)。 + +【datetime】日期时间:`operation` 常用 `now`;`format` 为 strftime 格式串(可选)。 + +【math_calculate】数学计算:`expression` 为安全算术表达式(如 `2+2*3`、`sqrt(16)`),勿编造结果,以工具返回为准。 + +【json_process】JSON 处理:`json_string` + `operation` 为 `parse` | `stringify` | `validate`。 + +【database_query】只读 SQL:**仅允许 SELECT**。未指定数据源时使用平台默认库;若需指定外部数据源可传 `data_source_id`。不得编造查询结果;大表注意 `timeout`(秒)。 + +【adb_log】Android 日志:依赖运行环境已安装 **adb** 且设备可用;`command` 等参数按工具 schema。仅在用户明确需要拉取/分析设备日志时使用,避免滥用。 + +【纪律】 +- 继承 13 号:同轮避免无故重复 `file_write`;勿在正文中刷屏 DSML。 +- `database_query` 禁止非 SELECT;`adb_log` 需环境与权限,失败时如实说明工具返回。 +""" + + +def _sanitize_edges(edges: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """去掉自环、按 (source,target) 去重,统一左右锚点。""" + seen: set = set() + out: List[Dict[str, Any]] = [] + for e in edges or []: + s, t = e.get("source"), e.get("target") + if not s or not t: + continue + if s == t: + continue + key = (s, t) + if key in seen: + continue + seen.add(key) + ne = dict(e) + ne["sourceHandle"] = "right" + ne["targetHandle"] = "left" + if not ne.get("id"): + ne["id"] = f"edge_{s}_{t}" + out.append(ne) + return out + + +def _find_start_node_ids(nodes: List[Dict[str, Any]]) -> List[str]: + ids: List[str] = [] + for n in nodes or []: + nid = n.get("id") or "" + nt = (n.get("type") or (n.get("data") or {}).get("type") or "").lower() + if nt == "start" or nid in ("start", "start-1") or str(nid).startswith("start-"): + ids.append(nid) + return ids + + +def _compute_ranks( + nodes: List[Dict[str, Any]], edges: List[Dict[str, Any]] +) -> Dict[str, int]: + node_ids = [n["id"] for n in nodes if n.get("id")] + start_ids = _find_start_node_ids(nodes) + incoming: Dict[str, int] = {nid: 0 for nid in node_ids} + for e in edges: + s, t = e.get("source"), e.get("target") + if not s or not t or s == t: + continue + if t in incoming: + incoming[t] += 1 + if not start_ids: + start_ids = [nid for nid in node_ids if incoming.get(nid, 0) == 0] or ([node_ids[0]] if node_ids else []) + + rank: Dict[str, int] = {s: 0 for s in start_ids} + nmax = max(len(nodes), 8) + for _ in range(nmax + 5): + updated = False + for e in edges: + s, t = e.get("source"), e.get("target") + if not s or not t or s == t: + continue + if s not in rank: + continue + nv = rank[s] + 1 + if t not in rank or rank[t] < nv: + rank[t] = nv + updated = True + if not updated: + break + max_r = max(rank.values(), default=0) + for nid in node_ids: + if nid not in rank: + rank[nid] = max_r + 1 + max_r += 1 + return rank + + +def _apply_layered_positions(nodes: List[Dict[str, Any]], ranks: Dict[str, int]) -> None: + layers: Dict[int, List[str]] = defaultdict(list) + for nid, r in ranks.items(): + layers[r].append(nid) + for r in layers: + layers[r].sort() + + x0, y0 = 80.0, 140.0 + x_step = 300.0 + y_step = 110.0 + + for r in sorted(layers.keys()): + ids = layers[r] + nlen = len(ids) + y_base = y0 - (nlen - 1) * y_step / 2.0 + for j, nid in enumerate(ids): + for node in nodes: + if node.get("id") != nid: + continue + pos = node.setdefault("position", {}) + pos["x"] = x0 + r * x_step + pos["y"] = y_base + j * y_step + break + + +def improve_workflow_layout_and_edges(wf: Dict[str, Any]) -> Tuple[int, int]: + """返回 (去掉的自环条数, 去掉的重复边条数)。""" + nodes = wf.get("nodes") or [] + raw_edges = wf.get("edges") or [] + loops = sum( + 1 + for e in raw_edges + if e.get("source") and e.get("target") and e.get("source") == e.get("target") + ) + clean = _sanitize_edges(raw_edges) + removed_dup = len(raw_edges) - len(clean) - loops + + wf["edges"] = clean + + ranks = _compute_ranks(nodes, clean) + _apply_layered_positions(nodes, ranks) + return loops, max(0, removed_dup) + + +def _patch_llm_unified(wf: dict, base_prompt: Optional[str] = None) -> None: + for n in wf.get("nodes") or []: + if n.get("id") != "llm-unified": + continue + d = n.setdefault("data", {}) + prompt = base_prompt if base_prompt else d.get("prompt") or "" + if PROMPT_V14_MARKER not in prompt: + prompt = (prompt.rstrip() + "\n" + PROMPT_V14_EXTRA).strip() + d["prompt"] = prompt + d["enable_tools"] = True + d["tools"] = list(TOOLS_V14) + d["selected_tools"] = list(TOOLS_V14) + return + print("警告: 未找到节点 llm-unified", file=sys.stderr) + + +def _find_agent_id_by_name(h: Dict[str, str], name: str) -> Optional[str]: + r = requests.get(f"{BASE}/api/v1/agents", params={"search": name, "limit": 50}, headers=h, timeout=30) + if r.status_code != 200: + return None + for a in r.json() or []: + if a.get("name") == name: + return a.get("id") + return None + + +def main() -> int: + r = requests.post( + f"{BASE}/api/v1/auth/login", + data={"username": USER, "password": PWD}, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + timeout=15, + ) + if r.status_code != 200: + print("登录失败:", r.status_code, r.text[:500], file=sys.stderr) + return 1 + token = r.json().get("access_token") + if not token: + print("无 access_token", file=sys.stderr) + return 1 + h = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"} + + src_id = _find_agent_id_by_name(h, SOURCE_NAME) + if not src_id: + print(f"未找到源 Agent: {SOURCE_NAME}", file=sys.stderr) + return 1 + + existing = _find_agent_id_by_name(h, TARGET_NAME) + if existing: + print("已存在", TARGET_NAME, "-> 仅更新工作流", existing) + new_id = existing + g = requests.get(f"{BASE}/api/v1/agents/{new_id}", headers=h, timeout=30) + if g.status_code != 200: + print("读取失败:", g.text, file=sys.stderr) + return 1 + agent = g.json() + else: + dup = requests.post( + f"{BASE}/api/v1/agents/{src_id}/duplicate", + headers=h, + json={"name": TARGET_NAME}, + timeout=60, + ) + if dup.status_code != 201: + print("复制失败:", dup.status_code, dup.text[:800], file=sys.stderr) + return 1 + new_id = dup.json()["id"] + agent = dup.json() + print("已创建副本:", new_id, TARGET_NAME) + + wf = copy.deepcopy(agent["workflow_config"]) + loops, dup_edges = improve_workflow_layout_and_edges(wf) + print(f"连线整理: 去掉自环 {loops} 条, 合并重复边 {dup_edges} 条") + + g2 = requests.get(f"{BASE}/api/v1/agents/{src_id}", headers=h, timeout=30) + base_prompt = None + if g2.status_code == 200: + try: + for n in g2.json().get("workflow_config", {}).get("nodes") or []: + if n.get("id") == "llm-unified": + base_prompt = (n.get("data") or {}).get("prompt") + break + except Exception: + pass + _patch_llm_unified(wf, base_prompt=base_prompt) + + desc = ( + "在知你客服13号基础上:扩展内置工具为全量(含 text_analyze、datetime、math_calculate、" + "json_process、database_query、adb_log 等);画布与 13 号一致整理;输出仍为单行 JSON。" + ) + + up = requests.put( + f"{BASE}/api/v1/agents/{new_id}", + headers=h, + json={"description": desc, "workflow_config": wf}, + timeout=120, + ) + if up.status_code != 200: + print("更新失败:", up.status_code, up.text[:1200], file=sys.stderr) + return 1 + print("已写入工具:", ", ".join(TOOLS_V14)) + print("Agent ID:", new_id) + print(json.dumps({"id": new_id, "name": TARGET_NAME}, ensure_ascii=False)) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/create_zhini_kefu_7.py b/backend/scripts/create_zhini_kefu_7.py new file mode 100644 index 0000000..9d67ab8 --- /dev/null +++ b/backend/scripts/create_zhini_kefu_7.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 +""" +从「知你客服6号」复制为「知你客服7号」,并更新 LLM 提示(强化姓名与 user_profile 记忆说明)。 +需本地平台已启动(默认 http://127.0.0.1:8037),账号 admin/123456。 + +用法: + cd backend && ..\\venv\\Scripts\\python.exe scripts/create_zhini_kefu_7.py + 或: python scripts/create_zhini_kefu_7.py +""" +from __future__ import annotations + +import json +import os +import sys + +import requests + +BASE = os.getenv("PLATFORM_BASE_URL", "http://127.0.0.1:8037").rstrip("/") +SOURCE_AGENT_ID = os.getenv("ZHINI_6_AGENT_ID", "2acc84d5-814b-4d61-9703-94a4b117375f") +USER = os.getenv("PLATFORM_USERNAME", "admin") +PWD = os.getenv("PLATFORM_PASSWORD", "123456") + +NEW_NAME = "知你客服7号" +NEW_DESC = ( + "在知你客服6号工作流基础上,配合引擎修复多轮记忆:" + "对话历史写入真实助手回复、合并 user_profile(含姓名);" + "LLM 提示词强调用户姓名与 user_profile 的维护。" +) + +LLM_PROMPT = """你是客服助手。根据「用户当前输入」「已知用户信息」「相关历史(检索)」和「最近几轮」完成: +1)判断意图; +2)生成一句自然、有帮助的回复; +3)【强制】只要用户说出或暗示自己的姓名、昵称,必须在 user_profile 里用字段 name 保存,例如用户说「我叫王小明」则 JSON 必须包含 "user_profile":{"name":"王小明"}(若已有其它字段则合并,不要丢字段); +4)若用户问「我叫什么」「你还记得我名字吗」等,必须根据「已知用户信息」里的 user_profile.name 与对话历史回答;若已有 name 则禁止说「还不知道」。 + +只输出一行合法 JSON,不要 markdown。格式示例: +{"intent":"greeting","reply":"你好王小明!","user_profile":{"name":"王小明"}} + +用户输入:{{user_input}} +已知用户信息:{{memory.user_profile}} +相关历史(检索到的):{{memory.relevant_from_retrieval}} +最近几轮:{{memory.recent_turns}} + +要求:reply 简洁自然,200 字以内;user_profile 为对象,至少包含 name(当用户自我介绍时)。""" + + +def main() -> int: + r = requests.post( + f"{BASE}/api/v1/auth/login", + data={"username": USER, "password": PWD}, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + timeout=15, + ) + if r.status_code != 200: + print("登录失败:", r.status_code, r.text[:500], file=sys.stderr) + return 1 + token = r.json().get("access_token") + if not token: + print("无 access_token", file=sys.stderr) + return 1 + h = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"} + + dup = requests.post( + f"{BASE}/api/v1/agents/{SOURCE_AGENT_ID}/duplicate", + headers=h, + json={"name": NEW_NAME}, + timeout=30, + ) + if dup.status_code != 201: + print("复制失败:", dup.status_code, dup.text[:800], file=sys.stderr) + return 1 + new_id = dup.json()["id"] + print("已创建副本:", new_id, NEW_NAME) + + g = requests.get(f"{BASE}/api/v1/agents/{new_id}", headers=h, timeout=30) + if g.status_code != 200: + print("读取 Agent 失败:", g.text, file=sys.stderr) + return 1 + agent = g.json() + wf = agent["workflow_config"] + nodes = wf.get("nodes") or [] + for n in nodes: + if n.get("id") == "llm-unified": + n.setdefault("data", {})["prompt"] = LLM_PROMPT + break + + up = requests.put( + f"{BASE}/api/v1/agents/{new_id}", + headers=h, + json={ + "description": NEW_DESC, + "workflow_config": wf, + }, + timeout=60, + ) + if up.status_code != 200: + print("更新失败:", up.status_code, up.text[:800], file=sys.stderr) + return 1 + print("已更新描述与 llm-unified 提示词") + print("Agent ID:", new_id) + print(json.dumps({"id": new_id, "name": NEW_NAME}, ensure_ascii=False)) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/create_zhini_kefu_8.py b/backend/scripts/create_zhini_kefu_8.py new file mode 100644 index 0000000..934ff91 --- /dev/null +++ b/backend/scripts/create_zhini_kefu_8.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 +""" +从「知你客服7号」复制为「知你客服8号」:在 7 号多轮记忆能力基础上,说明使用平台「永久记忆」 +(Cache user_memory_* 同步写入 MySQL persistent_user_memories,需 MEMORY_PERSIST_DB_ENABLED=true)。 + +需本地平台已启动(默认 http://127.0.0.1:8037),账号可通过环境变量配置。 + +用法: + cd backend && .\\venv\\Scripts\\python.exe scripts/create_zhini_kefu_8.py +""" +from __future__ import annotations + +import json +import os +import sys + +import requests + +BASE = os.getenv("PLATFORM_BASE_URL", "http://127.0.0.1:8037").rstrip("/") +# 默认从 7 号复制;也可通过环境变量指定 +SOURCE_AGENT_ID = os.getenv("ZHINI_7_AGENT_ID", "688c2c41-dcd1-4285-b193-6bed00c485c2") +USER = os.getenv("PLATFORM_USERNAME", "admin") +PWD = os.getenv("PLATFORM_PASSWORD", "123456") + +NEW_NAME = "知你客服8号" +NEW_DESC = ( + "在知你客服7号基础上面向「永久记忆」:工作流仍为 user_memory_{user_id} 读写;" + "引擎将记忆同步至 MySQL(跨 Redis TTL、服务重启仍保留)。" + "调用时请固定传入 user_id;部署需开启 MEMORY_PERSIST_DB_ENABLED。" +) + +LLM_PROMPT = """你是客服助手。根据「用户当前输入」「已知用户信息」「相关历史(检索)」和「最近几轮」完成: +1)判断意图; +2)生成一句自然、有帮助的回复; +3)【强制】只要用户说出或暗示自己的姓名、昵称,必须在 user_profile 里用字段 name 保存,例如用户说「我叫王小明」则 JSON 必须包含 "user_profile":{"name":"王小明"}(若已有其它字段则合并,不要丢字段); +4)若用户问「我叫什么」「你还记得我名字吗」等,必须根据「已知用户信息」里的 user_profile.name 与对话历史回答;若已有 name 则禁止说「还不知道」。 +5)系统会在后台持久化用户画像与近期对话;请始终基于「已知用户信息」与「最近几轮」作答,避免与用户已提供信息矛盾。 + +只输出一行合法 JSON,不要 markdown。格式示例: +{"intent":"greeting","reply":"你好王小明!","user_profile":{"name":"王小明"}} + +用户输入:{{user_input}} +已知用户信息:{{memory.user_profile}} +相关历史(检索到的):{{memory.relevant_from_retrieval}} +最近几轮:{{memory.recent_turns}} + +要求:reply 简洁自然,200 字以内;user_profile 为对象,至少包含 name(当用户自我介绍时)。""" + + +def _patch_cache_nodes_for_memory(wf: dict) -> None: + """为 Cache 节点设置更长对话窗口与较长 Redis TTL;真正永久存储由引擎写 MySQL。""" + nodes = wf.get("nodes") or [] + for n in nodes: + if n.get("type") != "cache": + continue + data = n.setdefault("data", {}) + op = data.get("operation", "get") + if op == "set": + data["max_history_length"] = 40 + data["ttl"] = 604800 # 7 天热缓存;冷数据仍可从 DB 拉回 + elif op == "get": + data["ttl"] = 604800 + + +def main() -> int: + r = requests.post( + f"{BASE}/api/v1/auth/login", + data={"username": USER, "password": PWD}, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + timeout=15, + ) + if r.status_code != 200: + print("登录失败:", r.status_code, r.text[:500], file=sys.stderr) + return 1 + token = r.json().get("access_token") + if not token: + print("无 access_token", file=sys.stderr) + return 1 + h = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"} + + dup = requests.post( + f"{BASE}/api/v1/agents/{SOURCE_AGENT_ID}/duplicate", + headers=h, + json={"name": NEW_NAME}, + timeout=30, + ) + if dup.status_code != 201: + print("复制失败:", dup.status_code, dup.text[:800], file=sys.stderr) + return 1 + new_id = dup.json()["id"] + print("已创建副本:", new_id, NEW_NAME) + + g = requests.get(f"{BASE}/api/v1/agents/{new_id}", headers=h, timeout=30) + if g.status_code != 200: + print("读取 Agent 失败:", g.text, file=sys.stderr) + return 1 + agent = g.json() + wf = agent["workflow_config"] + nodes = wf.get("nodes") or [] + for n in nodes: + if n.get("id") == "llm-unified": + n.setdefault("data", {})["prompt"] = LLM_PROMPT + break + _patch_cache_nodes_for_memory(wf) + + up = requests.put( + f"{BASE}/api/v1/agents/{new_id}", + headers=h, + json={ + "description": NEW_DESC, + "workflow_config": wf, + }, + timeout=60, + ) + if up.status_code != 200: + print("更新失败:", up.status_code, up.text[:800], file=sys.stderr) + return 1 + print("已更新描述、llm-unified 提示词,并为 Cache 节点设置 max_history_length/ttl(可选)") + print("Agent ID:", new_id) + print(json.dumps({"id": new_id, "name": NEW_NAME}, ensure_ascii=False)) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/create_zhini_kefu_9.py b/backend/scripts/create_zhini_kefu_9.py new file mode 100644 index 0000000..94b8467 --- /dev/null +++ b/backend/scripts/create_zhini_kefu_9.py @@ -0,0 +1,243 @@ +#!/usr/bin/env python3 +""" +从「知你客服8号」复制为「知你客服9号」,强化「摘要 + 检索」可用性: + +1. 上下文 code-build-context:更长近期轮次、注入 conversation_summary、向量结果 + 关键词从历史中捞相关句。 +2. 摘要路径 code-build-memory-value:在原有摘要分支上合并进完整 conversation_history(追加而非仅 2 条),并写回 conversation_summary。 +3. cache-update-summary:显式 value 为「memory」表达式,避免整包 input_data 写入 Redis。 +4. 向量写入:为每条 turn 带 metadata.user_id;引擎侧检索已按 user_id 过滤。 + +需平台可登录;默认源 8 号 ID 为上次创建结果,可用 ZHINI_8_AGENT_ID 覆盖。 +部署后请重启 Celery/API 以加载引擎向量过滤逻辑。 +""" +from __future__ import annotations + +import json +import os +import sys + +import requests + +BASE = os.getenv("PLATFORM_BASE_URL", "http://127.0.0.1:8037").rstrip("/") +SOURCE_AGENT_ID = os.getenv("ZHINI_8_AGENT_ID", "d7b64bf6-c8e3-4dc7-befc-03a98d5ff741") +USER = os.getenv("PLATFORM_USERNAME", "admin") +PWD = os.getenv("PLATFORM_PASSWORD", "123456") + +NEW_NAME = "知你客服9号" +NEW_DESC = ( + "在知你客服8号基础上强化摘要与检索:" + "远期要点写入 conversation_summary;" + "当轮上下文含「近期对话 + 摘要 + 向量片段 + 关键词相关历史」;" + "向量库写入带 user_id 元数据,引擎检索按用户隔离。" + "仍依赖 MEMORY_PERSIST_DB_ENABLED 与固定 user_id。" +) + +LLM_PROMPT = """你是客服助手。根据「用户当前输入」「已知用户信息」「远期摘要」「相关历史(检索)」和「最近几轮」完成: +1)判断意图; +2)生成一句自然、有帮助的回复; +3)【强制】用户说出或暗示姓名、昵称时,必须在 user_profile.name 保存;合并已有字段勿丢失; +4)用户问「我叫什么」等时,必须依据 user_profile.name 与对话/摘要回答;已有 name 时禁止说「还不知道」; +5)「远期摘要」概括更早话题;「相关历史」可能含向量命中或关键词命中的旧轮次,请结合使用。 + +只输出一行合法 JSON,不要 markdown。格式示例: +{"intent":"greeting","reply":"你好!","user_profile":{"name":"小明"}} + +用户输入:{{user_input}} +已知用户信息:{{memory.user_profile}} +远期摘要:{{memory.conversation_summary}} +相关历史(检索到的):{{memory.relevant_from_retrieval}} +最近几轮:{{memory.recent_turns}} + +要求:reply 简洁自然,200 字以内;user_profile 为对象。""" + +CODE_BUILD_CONTEXT = r"""left = input_data.get('left') or {} +right = input_data.get('right') or [] +if not isinstance(right, list): + right = [] +mem = left.get('memory') or {} +hist = mem.get('conversation_history') or [] +if not isinstance(hist, list): + hist = [] +summary = mem.get('conversation_summary') or '' +recent_n = 16 +recent = hist[-recent_n:] if len(hist) > recent_n else hist +recent_str = '\n'.join(f"{x.get('role', '')}: {x.get('content', '')}" for x in recent) +vec_str = '\n'.join((rec.get('text') or rec.get('content') or '') for rec in right) +query = (left.get('user_input') or left.get('query') or '').strip() +older = hist[:-recent_n] if len(hist) > recent_n else [] + + +def _tok(s): + s = str(s) + ch = {c for c in s if '\u4e00' <= c <= '\u9fff'} + wd = set(s.lower().replace('\n', ' ').split()) + return ch | wd + + +qt = _tok(query) if query else set() +scored = [] +for m in older: + c = str(m.get('content', '')) + if not c: + continue + sc = len(qt & _tok(c)) if qt else 0 + if sc > 0: + scored.append((sc, str(m.get('role', '')), c[:240])) +scored.sort(key=lambda x: -x[0]) +kw_lines = [f"{role}: {text}" for _, role, text in scored[:6]] +kw_str = '\n'.join(kw_lines) +relevant_str = vec_str.strip() +if kw_str: + if relevant_str: + relevant_str = relevant_str + '\n---\n关键词相关历史:\n' + kw_str + else: + relevant_str = '关键词相关历史:\n' + kw_str +result = { + 'user_input': left.get('user_input') or left.get('query') or '', + 'memory': { + 'user_profile': mem.get('user_profile') or {}, + 'conversation_summary': summary, + 'relevant_from_retrieval': relevant_str, + 'recent_turns': recent_str, + }, + 'query': left.get('query') or '', + 'user_id': left.get('user_id'), +} +""" + +CODE_BUILD_MEMORY_VALUE = r"""left = input_data.get('left') or {} +right_out = input_data.get('right') or {} +summary = '' +if isinstance(right_out, dict): + summary = right_out.get('output') or right_out.get('result') or '' +if not isinstance(summary, str): + summary = str(summary or '') +summary = summary.strip() +mem = left.get('memory') or {} +user_input = left.get('user_input') or left.get('query') or '' +reply = left.get('right') or '' +if isinstance(reply, dict): + reply = reply.get('right') or reply.get('content') or str(reply) +profile_update = left.get('user_profile_update') or {} +if not isinstance(profile_update, dict): + profile_update = {} +user_profile = dict(mem.get('user_profile') or {}, **profile_update) +ts = datetime.now().isoformat() +old_hist = mem.get('conversation_history') or [] +if not isinstance(old_hist, list): + old_hist = [] +new_hist = old_hist + [ + {'role': 'user', 'content': user_input, 'timestamp': ts}, + {'role': 'assistant', 'content': str(reply or ''), 'timestamp': ts}, +] +max_len = 40 +if len(new_hist) > max_len: + new_hist = new_hist[-max_len:] +prev_sum = (mem.get('conversation_summary') or '').strip() +conversation_summary = summary if summary else prev_sum +memory_value = { + 'conversation_summary': conversation_summary, + 'conversation_history': new_hist, + 'user_profile': user_profile, + 'context': mem.get('context') or {}, +} +result = { + 'memory': memory_value, + 'user_id': left.get('user_id'), + 'query': left.get('query'), + 'user_input': user_input, + 'right': reply, + 'user_profile_update': profile_update, +} +""" + +CODE_BUILD_TURN_FOR_VECTOR = r"""reply = input_data.get('right') or '' +if isinstance(reply, dict): + reply = reply.get('right') or reply.get('content') or str(reply) +query = input_data.get('query') or '' +user_id = str(input_data.get('user_id') or 'default') +raw = (user_id + '\n' + str(query) + '\n' + str(reply)).encode('utf-8', errors='ignore') +doc_id = 'turn_' + hashlib.sha256(raw).hexdigest()[:24] +text = '用户:' + str(query) + '\n助手:' + str(reply) +result = { + 'text': text, + 'user_id': user_id, + 'id': doc_id, + 'metadata': {'user_id': user_id}, +} +""" + + +def _patch_nodes(wf: dict) -> None: + nodes = wf.get("nodes") or [] + for n in nodes: + nid = n.get("id") + if nid == "llm-unified": + n.setdefault("data", {})["prompt"] = LLM_PROMPT + elif nid == "code-build-context": + n.setdefault("data", {})["code"] = CODE_BUILD_CONTEXT + elif nid == "code-build-memory-value": + n.setdefault("data", {})["code"] = CODE_BUILD_MEMORY_VALUE + elif nid == "code-build-turn-for-vector": + n.setdefault("data", {})["code"] = CODE_BUILD_TURN_FOR_VECTOR + elif nid == "cache-update-summary": + d = n.setdefault("data", {}) + d["value"] = "memory" + elif nid == "transform-for-vector-upsert": + m = n.setdefault("data", {}).setdefault("mapping", {}) + m["metadata"] = "{{left.metadata}}" + + +def main() -> int: + r = requests.post( + f"{BASE}/api/v1/auth/login", + data={"username": USER, "password": PWD}, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + timeout=15, + ) + if r.status_code != 200: + print("登录失败:", r.status_code, r.text[:500], file=sys.stderr) + return 1 + token = r.json().get("access_token") + if not token: + print("无 access_token", file=sys.stderr) + return 1 + h = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"} + + dup = requests.post( + f"{BASE}/api/v1/agents/{SOURCE_AGENT_ID}/duplicate", + headers=h, + json={"name": NEW_NAME}, + timeout=30, + ) + if dup.status_code != 201: + print("复制失败:", dup.status_code, dup.text[:800], file=sys.stderr) + return 1 + new_id = dup.json()["id"] + print("已创建副本:", new_id, NEW_NAME) + + g = requests.get(f"{BASE}/api/v1/agents/{new_id}", headers=h, timeout=30) + if g.status_code != 200: + print("读取 Agent 失败:", g.text, file=sys.stderr) + return 1 + agent = g.json() + wf = agent["workflow_config"] + _patch_nodes(wf) + + up = requests.put( + f"{BASE}/api/v1/agents/{new_id}", + headers=h, + json={"description": NEW_DESC, "workflow_config": wf}, + timeout=60, + ) + if up.status_code != 200: + print("更新失败:", up.status_code, up.text[:800], file=sys.stderr) + return 1 + print("已更新:LLM 提示、code-build-context / memory-value / vector-turn、cache-update-summary.value、upsert.metadata") + print("Agent ID:", new_id) + print(json.dumps({"id": new_id, "name": NEW_NAME}, ensure_ascii=False)) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/debug_cache_query.py b/backend/scripts/debug_cache_query.py new file mode 100644 index 0000000..9d9946a --- /dev/null +++ b/backend/scripts/debug_cache_query.py @@ -0,0 +1,37 @@ +"""单次执行后打印 cache-query 与 llm-unified 输出,用于排查记忆。""" +import json +import requests + +B = "http://127.0.0.1:8037" +AID = "688c2c41-dcd1-4285-b193-6bed00c485c2" +UID = "debug_uid_fresh_99" +MSG = "我叫李小红" + +r = requests.post( + B + "/api/v1/auth/login", + data={"username": "admin", "password": "123456"}, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + timeout=15, +) +h = {"Authorization": f"Bearer {r.json()['access_token']}", "Content-Type": "application/json"} +eid = requests.post( + B + "/api/v1/executions", + headers=h, + json={"agent_id": AID, "input_data": {"query": MSG, "USER_INPUT": MSG, "user_id": UID}}, + timeout=30, +).json()["id"] + +import time + +for _ in range(90): + d = requests.get(f"{B}/api/v1/executions/{eid}", headers=h, timeout=60).json() + if d["status"] not in ("pending", "running"): + break + time.sleep(0.6) + +od = d.get("output_data") or {} +nr = od.get("node_results") or {} +cq = nr.get("cache-query") or {} +llm = nr.get("llm-unified") or {} +print("cache-query memory user_profile:", (cq.get("memory") or {}).get("user_profile")) +print("llm output:", (llm.get("output") or "")[:600]) diff --git a/backend/scripts/e2e_zhini11_test.py b/backend/scripts/e2e_zhini11_test.py new file mode 100644 index 0000000..cec6314 --- /dev/null +++ b/backend/scripts/e2e_zhini11_test.py @@ -0,0 +1,153 @@ +""" +知你客服11号 E2E:普通对话 + 要求拉取 URL(触发 http_request)。 +需 API、Celery、LLM、外网可达测试 URL。 + +默认会先重启本机 Celery Worker(与 e2e_zhini7 一致),以加载含 code 节点 re/hashlib 注入的引擎。 +跳过重启: 设置环境变量 E2E_RESTART_CELERY=0 + +用法: cd backend && .\\venv\\Scripts\\python.exe scripts/e2e_zhini11_test.py +""" +from __future__ import annotations + +import json +import os +import subprocess +import sys +import time +import uuid +from pathlib import Path + +BACKEND_DIR = Path(__file__).resolve().parents[1] +VENV_PY = BACKEND_DIR / "venv" / "Scripts" / "python.exe" +API_BASE = os.environ.get("API_BASE", "http://127.0.0.1:8037") +AGENT_NAME = os.environ.get("E2E_AGENT_NAME", "知你客服11号") +# 小 JSON,适合测 GET +TEST_URL = os.environ.get( + "E2E_TEST_URL", + "https://jsonplaceholder.typicode.com/posts/1", +) + + +def _restart_celery() -> None: + ps = ( + "Get-CimInstance Win32_Process | " + "Where-Object { $_.CommandLine -match 'celery_app' } | " + "ForEach-Object { Stop-Process -Id $_.ProcessId -Force -ErrorAction SilentlyContinue }" + ) + subprocess.run( + ["powershell", "-NoProfile", "-Command", ps], + cwd=str(BACKEND_DIR), + capture_output=True, + text=True, + ) + time.sleep(2) + if not VENV_PY.is_file(): + print("未找到 venv Python,跳过启动 Celery", file=sys.stderr) + return + popen_kw: dict = { + "cwd": str(BACKEND_DIR), + "stdout": subprocess.DEVNULL, + "stderr": subprocess.STDOUT, + } + if sys.platform == "win32": + popen_kw["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP # type: ignore[attr-defined] + subprocess.Popen( + [ + str(VENV_PY), + "-m", + "celery", + "-A", + "app.core.celery_app", + "worker", + "--loglevel=info", + "--pool=threads", + "--concurrency=8", + ], + **popen_kw, + ) + print("已启动新 Celery Worker,等待就绪…") + time.sleep(4) + + +def main() -> int: + os.chdir(BACKEND_DIR) + sys.path.insert(0, str(BACKEND_DIR)) + + if os.environ.get("E2E_RESTART_CELERY", "1").strip().lower() not in ("0", "false", "no"): + _restart_celery() + + import httpx + from app.core.database import SessionLocal + from app.core.security import create_access_token + from app.models.agent import Agent + from app.models.user import User + + db = SessionLocal() + try: + agent = db.query(Agent).filter(Agent.name == AGENT_NAME).first() + if not agent: + print(f"未找到「{AGENT_NAME}」", file=sys.stderr) + return 1 + owner = db.query(User).filter(User.id == agent.user_id).first() + user = owner or db.query(User).first() + if not user: + print("无用户", file=sys.stderr) + return 1 + token = create_access_token(data={"sub": user.id, "username": user.username}) + headers = {"Authorization": f"Bearer {token}"} + uid = f"e2e_z11_{uuid.uuid4().hex[:10]}" + print(f"agent={agent.id} user_id={uid} test_url={TEST_URL}\n") + + def poll(client: httpx.Client, eid: str, timeout: float = 420.0) -> dict: + t0 = time.time() + while time.time() - t0 < timeout: + r = client.get(f"/api/v1/executions/{eid}", headers=headers) + r.raise_for_status() + d = r.json() + st = d.get("status") + if st == "completed": + return d + if st == "failed": + print("failed:", d.get("error_message"), file=sys.stderr) + raise RuntimeError("执行失败") + time.sleep(1.5) + raise TimeoutError("超时") + + def reply_text(out: dict) -> str: + od = out.get("output_data") or {} + if isinstance(od, dict): + r = od.get("result") + if isinstance(r, str): + return r[:800] + return json.dumps(od, ensure_ascii=False)[:800] + + rounds = [ + "我的名字叫测试员", + f"请用工具访问这个网址并简要说明返回里 title 或主要内容是什么(只回答要点):{TEST_URL}", + "我叫什么名字?", + ] + + with httpx.Client(base_url=API_BASE, timeout=420.0) as client: + for i, q in enumerate(rounds, 1): + r = client.post( + "/api/v1/executions", + json={"agent_id": str(agent.id), "input_data": {"query": q, "user_id": uid}}, + headers=headers, + ) + if r.status_code >= 400: + print(r.text, file=sys.stderr) + r.raise_for_status() + eid = r.json()["id"] + print(f"--- 第{i}轮 execution={eid} ---") + out = poll(client, eid) + print(f"Q: {q[:120]}...") + print(f"A: {reply_text(out)}\n") + + print("完成") + finally: + db.close() + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/e2e_zhini12_123_md.py b/backend/scripts/e2e_zhini12_123_md.py new file mode 100644 index 0000000..32f5ead --- /dev/null +++ b/backend/scripts/e2e_zhini12_123_md.py @@ -0,0 +1,99 @@ +"""一次调用「知你客服12号」创建仓库根下 123.md(相对路径 123.md)。默认不重启 Celery。""" +from __future__ import annotations + +import json +import os +import sys +import time +import uuid +from pathlib import Path + +BACKEND_DIR = Path(__file__).resolve().parents[1] +REPO_ROOT = BACKEND_DIR.parent +API_BASE = os.environ.get("API_BASE", "http://127.0.0.1:8037") +AGENT_NAME = os.environ.get("E2E_AGENT_NAME", "知你客服12号") +REL_PATH = "123.md" +FILE_CONTENT = "# 123\ne2e zhini12 123.md marker\n" + + +def main() -> int: + os.chdir(BACKEND_DIR) + sys.path.insert(0, str(BACKEND_DIR)) + + import httpx + from app.core.database import SessionLocal + from app.core.security import create_access_token + from app.models.agent import Agent + from app.models.user import User + + db = SessionLocal() + try: + agent = db.query(Agent).filter(Agent.name == AGENT_NAME).first() + if not agent: + print(f"未找到「{AGENT_NAME}」", file=sys.stderr) + return 1 + owner = db.query(User).filter(User.id == agent.user_id).first() + user = owner or db.query(User).first() + if not user: + print("无用户", file=sys.stderr) + return 1 + token = create_access_token(data={"sub": user.id, "username": user.username}) + headers = {"Authorization": f"Bearer {token}"} + uid = f"e2e123_{uuid.uuid4().hex[:10]}" + q = ( + f"创建 123.md。请用 file_write:相对路径 {REL_PATH}(工作区根下)," + f"content 为 {json.dumps(FILE_CONTENT, ensure_ascii=False)},mode 为 w。" + "reply 中写出 file_write 返回的真实 JSON。最后一行单行 JSON:intent、reply、user_profile。" + ) + print(f"agent={agent.id} ({AGENT_NAME}) user_id={uid}") + print(f"目标文件: {(REPO_ROOT / REL_PATH).resolve()}") + + def poll(client: httpx.Client, eid: str, timeout: float = 300.0) -> dict: + t0 = time.time() + while time.time() - t0 < timeout: + r = client.get(f"/api/v1/executions/{eid}", headers=headers) + r.raise_for_status() + d = r.json() + st = d.get("status") + if st == "completed": + return d + if st == "failed": + print("failed:", d.get("error_message"), file=sys.stderr) + raise RuntimeError("执行失败") + time.sleep(1.5) + raise TimeoutError("超时") + + with httpx.Client(base_url=API_BASE, timeout=300.0) as client: + r = client.post( + "/api/v1/executions", + json={"agent_id": str(agent.id), "input_data": {"query": q, "user_id": uid}}, + headers=headers, + ) + if r.status_code >= 400: + print(r.text, file=sys.stderr) + r.raise_for_status() + eid = r.json()["id"] + print(f"execution={eid}") + out = poll(client, eid) + od = out.get("output_data") or {} + result = od.get("result", od) + print("\n--- API result (截断 2000 字符) ---\n") + print(str(result)[:2000]) + + abs_file = (REPO_ROOT / REL_PATH).resolve() + if not abs_file.is_file(): + print(f"\n[FAIL] 磁盘未找到: {abs_file}", file=sys.stderr) + return 2 + body = abs_file.read_text(encoding="utf-8", errors="replace") + print(f"\n[OK] 文件: {abs_file}") + print("--- 内容 ---\n", body[:800]) + if "e2e zhini12 123.md marker" not in body: + print("\n[WARN] 未找到预期标记字符串", file=sys.stderr) + print("\n完成") + return 0 + finally: + db.close() + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/e2e_zhini12_bbb_md.py b/backend/scripts/e2e_zhini12_bbb_md.py new file mode 100644 index 0000000..87ab643 --- /dev/null +++ b/backend/scripts/e2e_zhini12_bbb_md.py @@ -0,0 +1,184 @@ +""" +通过「知你客服12号」Agent 实测:在 user_data 下创建 bbb.md(对应 D:\\aaa\\aiagent\\user_data\\bbb.md)。 + +依赖:API(默认 8037)、Redis、Celery、LLM;引擎需已修复 End 节点优先于 vector-upsert(否则界面 result 可能仍不对,但 file_write 可能已执行)。 + +用法: + cd backend + .\\venv\\Scripts\\python.exe scripts\\e2e_zhini12_bbb_md.py + +环境变量: + API_BASE 默认 http://127.0.0.1:8037 + E2E_AGENT_NAME 默认 知你客服12号 + E2E_REL_FILE 默认 user_data/bbb.md + E2E_FILE_CONTENT 写入内容,默认带标记行便于校验 + E2E_RESTART_CELERY 默认 1;设为 0 跳过重启 Worker + E2E_USE_ABSPATH_MSG 设为 1 时在用户话术中额外要求使用绝对路径 D:\\aaa\\aiagent\\user_data\\bbb.md +""" +from __future__ import annotations + +import json +import os +import subprocess +import sys +import time +import uuid +from pathlib import Path + +BACKEND_DIR = Path(__file__).resolve().parents[1] +VENV_PY = BACKEND_DIR / "venv" / "Scripts" / "python.exe" +API_BASE = os.environ.get("API_BASE", "http://127.0.0.1:8037") +AGENT_NAME = os.environ.get("E2E_AGENT_NAME", "知你客服12号") +REL_PATH = os.environ.get("E2E_REL_FILE", "user_data/bbb.md") +FILE_CONTENT = os.environ.get( + "E2E_FILE_CONTENT", + "# bbb\n\nzhini12 e2e bbb marker\n", +) + + +def _restart_celery() -> None: + ps = ( + "Get-CimInstance Win32_Process | " + "Where-Object { $_.CommandLine -match 'celery_app' } | " + "ForEach-Object { Stop-Process -Id $_.ProcessId -Force -ErrorAction SilentlyContinue }" + ) + subprocess.run( + ["powershell", "-NoProfile", "-Command", ps], + cwd=str(BACKEND_DIR), + capture_output=True, + text=True, + ) + time.sleep(2) + if not VENV_PY.is_file(): + print("未找到 venv Python,跳过启动 Celery", file=sys.stderr) + return + kw: dict = {"cwd": str(BACKEND_DIR), "stdout": subprocess.DEVNULL, "stderr": subprocess.STDOUT} + if sys.platform == "win32": + kw["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP # type: ignore[attr-defined] + popen_env = os.environ.copy() + if os.environ.get("E2E_LLM_TOOL_CHOICE", "required").strip().lower() not in ( + "0", + "false", + "no", + "auto", + ): + popen_env["LLM_TOOL_CHOICE"] = "required" + subprocess.Popen( + [ + str(VENV_PY), + "-m", + "celery", + "-A", + "app.core.celery_app", + "worker", + "--loglevel=info", + "--pool=threads", + "--concurrency=8", + ], + env=popen_env, + **kw, + ) + print("已启动 Celery,等待就绪…") + time.sleep(4) + + +def main() -> int: + os.chdir(BACKEND_DIR) + sys.path.insert(0, str(BACKEND_DIR)) + # 实测 file_write:部分模型在 tool_choice=auto 下不发起 tool_calls,只输出伪造的 JSON 文本;E2E 默认要求至少一次函数调用 + if os.environ.get("E2E_LLM_TOOL_CHOICE", "required").strip().lower() not in ("0", "false", "no", "auto"): + os.environ["LLM_TOOL_CHOICE"] = "required" + if os.environ.get("E2E_RESTART_CELERY", "1").strip().lower() not in ("0", "false", "no"): + _restart_celery() + + import httpx + from app.core.database import SessionLocal + from app.core.security import create_access_token + from app.models.agent import Agent + from app.models.user import User + + db = SessionLocal() + try: + agent = db.query(Agent).filter(Agent.name == AGENT_NAME).first() + if not agent: + print(f"未找到「{AGENT_NAME}」", file=sys.stderr) + return 1 + owner = db.query(User).filter(User.id == agent.user_id).first() + user = owner or db.query(User).first() + if not user: + print("无用户", file=sys.stderr) + return 1 + token = create_access_token(data={"sub": user.id, "username": user.username}) + headers = {"Authorization": f"Bearer {token}"} + uid = f"bbb12_{uuid.uuid4().hex[:10]}" + + abs_win = str((BACKEND_DIR.parent / REL_PATH.replace("/", os.sep)).resolve()) + use_abs = os.environ.get("E2E_USE_ABSPATH_MSG", "").strip().lower() in ("1", "true", "yes") + + path_hint = ( + f"绝对路径 {json.dumps(abs_win, ensure_ascii=False)}" + if use_abs + else f"相对路径 {REL_PATH}(相对工作区根)" + ) + _basename = os.path.basename(REL_PATH.replace("\\", "/")) + q = ( + f"请在 D:\\\\aaa\\\\aiagent\\\\user_data 目录下创建 {_basename}。" + f"必须通过工具 file_write 写入:优先使用 {path_hint};" + f"content 为 {json.dumps(FILE_CONTENT, ensure_ascii=False)},mode 为 w。" + f"file_write 返回的 JSON 必须原样体现在你最终 reply 的可读说明里(含 success 与 file_path)。" + f"最后一行仍输出单行 JSON:intent、reply、user_profile。" + ) + print(f"agent={agent.id} ({AGENT_NAME}) user_id={uid}\n目标文件(解析后): {abs_win}\n") + + def poll(client: httpx.Client, eid: str, timeout: float = 300.0) -> dict: + t0 = time.time() + while time.time() - t0 < timeout: + r = client.get(f"/api/v1/executions/{eid}", headers=headers) + r.raise_for_status() + d = r.json() + st = d.get("status") + if st == "completed": + return d + if st == "failed": + print("failed:", d.get("error_message"), file=sys.stderr) + raise RuntimeError("执行失败") + time.sleep(1.5) + raise TimeoutError("超时") + + with httpx.Client(base_url=API_BASE, timeout=300.0) as client: + r = client.post( + "/api/v1/executions", + json={"agent_id": str(agent.id), "input_data": {"query": q, "user_id": uid}}, + headers=headers, + ) + if r.status_code >= 400: + print(r.text, file=sys.stderr) + r.raise_for_status() + eid = r.json()["id"] + print(f"execution={eid}") + out = poll(client, eid) + od = out.get("output_data") or {} + result = od.get("result", od) + print("\n--- API output_data.result (截断 1500 字符) ---\n") + print(str(result)[:1500]) + + abs_file = Path(abs_win).resolve() + if not abs_file.is_file(): + print(f"\n[FAIL] 磁盘未找到: {abs_file}", file=sys.stderr) + print("若 API 已 completed:可能是模型未触发 file_write,或路径/权限问题;可看 Celery 日志「执行工具 file_write」。", file=sys.stderr) + return 2 + + body = abs_file.read_text(encoding="utf-8", errors="replace") + print(f"\n[OK] 文件存在: {abs_file}\n--- 内容 ---\n{body}\n---") + marker = os.environ.get("E2E_MARKER", "zhini12 e2e bbb marker") + if marker and marker not in body and FILE_CONTENT.strip() and marker in FILE_CONTENT: + print(f"[WARN] 未在文件中发现预期标记「{marker}」,可能内容与 E2E_FILE_CONTENT 不一致", file=sys.stderr) + finally: + db.close() + + print("\n完成") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/e2e_zhini12_bbbb_md.py b/backend/scripts/e2e_zhini12_bbbb_md.py new file mode 100644 index 0000000..02952be --- /dev/null +++ b/backend/scripts/e2e_zhini12_bbbb_md.py @@ -0,0 +1,239 @@ +""" +E2E:知你客服12号 — 用户话术「在 D:\\aaa\\aiagent\\user_data 下创建 bbbb.md」 +并校验:/health 内置工具、落盘路径必须在工作区内(user_data 子路径)、可选 node_results 中的 file_write 痕迹。 + +用法(在 backend 目录): + .\\venv\\Scripts\\python.exe scripts\\e2e_zhini12_bbbb_md.py + +环境变量:API_BASE、E2E_AGENT_NAME、E2E_RESTART_CELERY、E2E_LLM_TOOL_CHOICE。 +注意:固定 user_data/bbbb.md 与正文/标记常量;不读取 E2E_REL_FILE、E2E_FILE_CONTENT、E2E_MARKER(避免 Shell 残留误测)。 +默认 E2E_LLM_TOOL_CHOICE=auto。 +""" +from __future__ import annotations + +import json +import os +import subprocess +import sys +import time +import uuid +from pathlib import Path + +BACKEND_DIR = Path(__file__).resolve().parents[1] +REPO_ROOT = BACKEND_DIR.parent +USER_DATA_ROOT = (REPO_ROOT / "user_data").resolve() +VENV_PY = BACKEND_DIR / "venv" / "Scripts" / "python.exe" +API_BASE = os.environ.get("API_BASE", "http://127.0.0.1:8037") +AGENT_NAME = os.environ.get("E2E_AGENT_NAME", "知你客服12号") +# 固定测 bbbb.md,勿用 E2E_REL_FILE(PowerShell 里常残留 ccc/bbb) +REL_PATH = "user_data/bbbb.md" +# 固定内容与标记(不用 E2E_FILE_CONTENT / E2E_MARKER 环境变量,避免 PowerShell 残留旧值) +FILE_CONTENT = "# bbbb\n\ne2e bbbb permission marker\n" +E2E_MARKER = "e2e bbbb permission marker" + + +def _restart_celery() -> None: + ps = ( + "Get-CimInstance Win32_Process | " + "Where-Object { $_.CommandLine -match 'celery_app' } | " + "ForEach-Object { Stop-Process -Id $_.ProcessId -Force -ErrorAction SilentlyContinue }" + ) + subprocess.run( + ["powershell", "-NoProfile", "-Command", ps], + cwd=str(BACKEND_DIR), + capture_output=True, + text=True, + ) + time.sleep(2) + if not VENV_PY.is_file(): + print("未找到 venv Python,跳过启动 Celery", file=sys.stderr) + return + kw: dict = {"cwd": str(BACKEND_DIR), "stdout": subprocess.DEVNULL, "stderr": subprocess.STDOUT} + if sys.platform == "win32": + kw["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP # type: ignore[attr-defined] + popen_env = os.environ.copy() + if os.environ.get("E2E_LLM_TOOL_CHOICE", "auto").strip().lower() not in ( + "0", + "false", + "no", + "auto", + ): + popen_env["LLM_TOOL_CHOICE"] = "required" + subprocess.Popen( + [ + str(VENV_PY), + "-m", + "celery", + "-A", + "app.core.celery_app", + "worker", + "--loglevel=info", + "--pool=threads", + "--concurrency=8", + ], + env=popen_env, + **kw, + ) + print("已启动 Celery,等待就绪…") + time.sleep(4) + + +def _health_check(client) -> bool: + r = client.get(f"{API_BASE.rstrip('/')}/health", timeout=15.0) + r.raise_for_status() + data = r.json() + print("\n--- GET /health ---") + print(json.dumps(data, ensure_ascii=False, indent=2)[:2500]) + checks = data.get("checks") + if not checks: + print( + "\n[INFO] /health 无 checks 字段(可能 API 未更新);请以 Worker 日志「内置工具就绪」为准。", + file=sys.stderr, + ) + return True + ok = checks.get("builtin_tools_ready") and checks.get("file_agent_core_ready") + if not ok: + print( + "\n[WARN] API 进程 builtin_tools 未完全就绪。请同步重启 API 与 Celery。", + file=sys.stderr, + ) + return bool(ok) + + +def _path_allowed(abs_file: Path) -> tuple[bool, str]: + """file_write 合法路径:须在仓库根下,且本次要求落在 user_data 下。""" + try: + f = abs_file.resolve() + repo = REPO_ROOT.resolve() + except OSError as e: + return False, str(e) + try: + f.relative_to(repo) + except ValueError: + return False, f"文件不在仓库根内: {f} vs root {repo}" + try: + f.relative_to(USER_DATA_ROOT) + except ValueError: + return False, f"文件不在 user_data 下: {f} vs {USER_DATA_ROOT}" + return True, "" + + +def _scan_file_write_in_node_results(od: dict) -> None: + nr = od.get("node_results") or {} + hits = [] + for nid, payload in nr.items(): + if not isinstance(payload, dict): + continue + out = payload.get("output") + s = json.dumps(out, ensure_ascii=False) if not isinstance(out, str) else out + if "file_write" in s and ("success" in s.lower() or "file_path" in s): + hits.append((nid, s[:400])) + print("\n--- node_results 中含 file_write 线索的节点 ---") + if not hits: + print("(未扫到明显 JSON;可能结果只在 LLM 正文或日志中)") + for nid, frag in hits: + print(f" {nid}: {frag}...") + + +def main() -> int: + os.chdir(BACKEND_DIR) + sys.path.insert(0, str(BACKEND_DIR)) + if os.environ.get("E2E_LLM_TOOL_CHOICE", "auto").strip().lower() not in ("0", "false", "no", "auto"): + os.environ["LLM_TOOL_CHOICE"] = "required" + if os.environ.get("E2E_RESTART_CELERY", "1").strip().lower() not in ("0", "false", "no"): + _restart_celery() + + import httpx + from app.core.database import SessionLocal + from app.core.security import create_access_token + from app.models.agent import Agent + from app.models.user import User + + db = SessionLocal() + try: + with httpx.Client(timeout=30.0) as hclient: + _health_check(hclient) + + agent = db.query(Agent).filter(Agent.name == AGENT_NAME).first() + if not agent: + print(f"未找到「{AGENT_NAME}」", file=sys.stderr) + return 1 + owner = db.query(User).filter(User.id == agent.user_id).first() + user = owner or db.query(User).first() + if not user: + print("无用户", file=sys.stderr) + return 1 + token = create_access_token(data={"sub": user.id, "username": user.username}) + headers = {"Authorization": f"Bearer {token}"} + uid = f"bbbb12_{uuid.uuid4().hex[:10]}" + + abs_win = str((REPO_ROOT / REL_PATH.replace("/", os.sep)).resolve()) + _basename = os.path.basename(REL_PATH.replace("\\", "/")) + q = ( + f"请在 D:\\\\aaa\\\\aiagent\\\\user_data 目录下创建 {_basename}。" + f"必须通过工具 file_write 写入:优先使用 相对路径 {REL_PATH}(相对工作区根);" + f"content 为 {json.dumps(FILE_CONTENT, ensure_ascii=False)},mode 为 w。" + f"file_write 返回的 JSON 必须原样体现在你最终 reply 的可读说明里(含 success 与 file_path)。" + f"最后一行仍输出单行 JSON:intent、reply、user_profile。" + ) + print(f"\nagent={agent.id} ({AGENT_NAME}) user_id={uid}") + print(f"目标文件(解析后): {abs_win}") + print(f"权限校验: 须位于 {USER_DATA_ROOT}") + + def poll(client: httpx.Client, eid: str, timeout: float = 300.0) -> dict: + t0 = time.time() + while time.time() - t0 < timeout: + r = client.get(f"/api/v1/executions/{eid}", headers=headers) + r.raise_for_status() + d = r.json() + st = d.get("status") + if st == "completed": + return d + if st == "failed": + print("failed:", d.get("error_message"), file=sys.stderr) + raise RuntimeError("执行失败") + time.sleep(1.5) + raise TimeoutError("超时") + + with httpx.Client(base_url=API_BASE, timeout=300.0) as client: + r = client.post( + "/api/v1/executions", + json={"agent_id": str(agent.id), "input_data": {"query": q, "user_id": uid}}, + headers=headers, + ) + if r.status_code >= 400: + print(r.text, file=sys.stderr) + r.raise_for_status() + eid = r.json()["id"] + print(f"\nexecution={eid}") + out = poll(client, eid) + od = out.get("output_data") or {} + result = od.get("result", od) + print("\n--- API output_data.result (截断 1500 字符) ---\n") + print(str(result)[:1500]) + _scan_file_write_in_node_results(od) + + abs_file = Path(abs_win).resolve() + allowed, reason = _path_allowed(abs_file) + if not allowed: + print(f"\n[FAIL] 路径权限校验: {reason}", file=sys.stderr) + return 3 + + if not abs_file.is_file(): + print(f"\n[FAIL] 磁盘未找到: {abs_file}", file=sys.stderr) + print("若 completed:可能未触发 file_write;查 Celery「执行工具 file_write」。", file=sys.stderr) + return 2 + + body = abs_file.read_text(encoding="utf-8", errors="replace") + print(f"\n[OK] 文件存在且路径合法: {abs_file}\n--- 内容 ---\n{body}\n---") + if E2E_MARKER and E2E_MARKER not in body and FILE_CONTENT.strip() and E2E_MARKER in FILE_CONTENT: + print(f"[WARN] 未在文件中发现标记「{E2E_MARKER}」", file=sys.stderr) + finally: + db.close() + + print("\n[OK] E2E bbbb.md + 路径权限 通过\n完成") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/e2e_zhini12_file_test.py b/backend/scripts/e2e_zhini12_file_test.py new file mode 100644 index 0000000..a049d98 --- /dev/null +++ b/backend/scripts/e2e_zhini12_file_test.py @@ -0,0 +1,142 @@ +""" +知你客服12号:一轮对话触发 file_write(相对路径 user_data/e2e_12.md)。 +可选重启 Celery:E2E_RESTART_CELERY=1(默认 1)。 + +用法: cd backend && .\\venv\\Scripts\\python.exe scripts/e2e_zhini12_file_test.py +""" +from __future__ import annotations + +import json +import os +import subprocess +import sys +import time +import uuid +from pathlib import Path + +BACKEND_DIR = Path(__file__).resolve().parents[1] +VENV_PY = BACKEND_DIR / "venv" / "Scripts" / "python.exe" +API_BASE = os.environ.get("API_BASE", "http://127.0.0.1:8037") +AGENT_NAME = os.environ.get("E2E_AGENT_NAME", "知你客服12号") +REL_PATH = os.environ.get("E2E_REL_FILE", "user_data/e2e_12.md") +FILE_CONTENT = os.environ.get("E2E_FILE_CONTENT", "e2e zhini12 ok\n") + + +def _restart_celery() -> None: + ps = ( + "Get-CimInstance Win32_Process | " + "Where-Object { $_.CommandLine -match 'celery_app' } | " + "ForEach-Object { Stop-Process -Id $_.ProcessId -Force -ErrorAction SilentlyContinue }" + ) + subprocess.run( + ["powershell", "-NoProfile", "-Command", ps], + cwd=str(BACKEND_DIR), + capture_output=True, + text=True, + ) + time.sleep(2) + if not VENV_PY.is_file(): + print("未找到 venv Python,跳过启动 Celery", file=sys.stderr) + return + kw: dict = {"cwd": str(BACKEND_DIR), "stdout": subprocess.DEVNULL, "stderr": subprocess.STDOUT} + if sys.platform == "win32": + kw["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP # type: ignore[attr-defined] + subprocess.Popen( + [ + str(VENV_PY), + "-m", + "celery", + "-A", + "app.core.celery_app", + "worker", + "--loglevel=info", + "--pool=threads", + "--concurrency=8", + ], + **kw, + ) + print("已启动 Celery,等待就绪…") + time.sleep(4) + + +def main() -> int: + os.chdir(BACKEND_DIR) + sys.path.insert(0, str(BACKEND_DIR)) + if os.environ.get("E2E_RESTART_CELERY", "1").strip().lower() not in ("0", "false", "no"): + _restart_celery() + + import httpx + from app.core.database import SessionLocal + from app.core.security import create_access_token + from app.models.agent import Agent + from app.models.user import User + + db = SessionLocal() + try: + agent = db.query(Agent).filter(Agent.name == AGENT_NAME).first() + if not agent: + print(f"未找到「{AGENT_NAME}」", file=sys.stderr) + return 1 + owner = db.query(User).filter(User.id == agent.user_id).first() + user = owner or db.query(User).first() + if not user: + print("无用户", file=sys.stderr) + return 1 + token = create_access_token(data={"sub": user.id, "username": user.username}) + headers = {"Authorization": f"Bearer {token}"} + uid = f"e2e12_{uuid.uuid4().hex[:10]}" + q = ( + f"请调用 file_write:file_path 用相对路径 {REL_PATH},content 用 {json.dumps(FILE_CONTENT, ensure_ascii=False)}," + "mode 用 w。完成后在 reply 里写出 file_write 返回的原始 JSON 字符串(不要编造)。" + "最终只输出一行 JSON:intent、reply、user_profile。" + ) + print(f"agent={agent.id} user_id={uid}\nQ: {q[:200]}...") + + def poll(client: httpx.Client, eid: str, timeout: float = 300.0) -> dict: + t0 = time.time() + while time.time() - t0 < timeout: + r = client.get(f"/api/v1/executions/{eid}", headers=headers) + r.raise_for_status() + d = r.json() + st = d.get("status") + if st == "completed": + return d + if st == "failed": + print("failed:", d.get("error_message"), file=sys.stderr) + raise RuntimeError("执行失败") + time.sleep(1.5) + raise TimeoutError("超时") + + with httpx.Client(base_url=API_BASE, timeout=300.0) as client: + r = client.post( + "/api/v1/executions", + json={"agent_id": str(agent.id), "input_data": {"query": q, "user_id": uid}}, + headers=headers, + ) + if r.status_code >= 400: + print(r.text, file=sys.stderr) + r.raise_for_status() + eid = r.json()["id"] + print(f"execution={eid}") + out = poll(client, eid) + od = out.get("output_data") or {} + result = od.get("result", od) + print("--- API result (截断) ---") + print(str(result)[:1200]) + + root = BACKEND_DIR.parent + abs_file = (root / REL_PATH.replace("/", os.sep)).resolve() + if abs_file.is_file(): + body = abs_file.read_text(encoding="utf-8", errors="replace") + print(f"\n磁盘文件存在: {abs_file}\n内容:\n{body!r}") + else: + print(f"\n磁盘未找到: {abs_file}", file=sys.stderr) + return 2 + finally: + db.close() + print("\n完成") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/e2e_zhini7_two_rounds.py b/backend/scripts/e2e_zhini7_two_rounds.py new file mode 100644 index 0000000..badad4c --- /dev/null +++ b/backend/scripts/e2e_zhini7_two_rounds.py @@ -0,0 +1,171 @@ +""" +重启 Celery Worker(Windows),并对「知你客服7号」做两轮 API 测试: +1)我的名字叫小七 2)我叫什么名字? +需:本机 API 已监听(默认 8037)、Redis、LLM 配置可用。 +""" +from __future__ import annotations + +import json +import os +import subprocess +import sys +import time +import uuid +from pathlib import Path + +BACKEND_DIR = Path(__file__).resolve().parents[1] +VENV_PY = BACKEND_DIR / "venv" / "Scripts" / "python.exe" +API_BASE = os.environ.get("API_BASE", "http://127.0.0.1:8037") + + +def _restart_celery() -> None: + ps = ( + "Get-CimInstance Win32_Process | " + "Where-Object { $_.CommandLine -match 'celery_app' } | " + "ForEach-Object { Stop-Process -Id $_.ProcessId -Force -ErrorAction SilentlyContinue }" + ) + subprocess.run( + ["powershell", "-NoProfile", "-Command", ps], + cwd=str(BACKEND_DIR), + capture_output=True, + text=True, + ) + time.sleep(2) + if not VENV_PY.is_file(): + print("未找到 venv Python,跳过启动 Celery", file=sys.stderr) + return + popen_kw: dict = { + "cwd": str(BACKEND_DIR), + "stdout": subprocess.DEVNULL, + "stderr": subprocess.STDOUT, + } + if sys.platform == "win32": + popen_kw["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP # type: ignore[attr-defined] + subprocess.Popen( + [ + str(VENV_PY), + "-m", + "celery", + "-A", + "app.core.celery_app", + "worker", + "--loglevel=info", + "--pool=threads", + "--concurrency=8", + ], + **popen_kw, + ) + print("已启动新 Celery Worker(线程池),等待就绪…") + time.sleep(4) + + +def _touch_api_reload() -> None: + """若 uvicorn 带 --reload,触发重载。""" + main_py = BACKEND_DIR / "app" / "main.py" + if main_py.is_file(): + main_py.touch() + print("已 touch app/main.py 以触发 API 热重载(若启用 --reload)") + + +def main() -> int: + os.chdir(BACKEND_DIR) + sys.path.insert(0, str(BACKEND_DIR)) + + _restart_celery() + _touch_api_reload() + + import httpx + from app.core.database import SessionLocal + from app.core.security import create_access_token + from app.models.agent import Agent + from app.models.user import User + + db = SessionLocal() + try: + agent = db.query(Agent).filter(Agent.name == "知你客服7号").first() + if not agent: + print("数据库中未找到名为「知你客服7号」的 Agent", file=sys.stderr) + return 1 + owner = db.query(User).filter(User.id == agent.user_id).first() + user = owner or db.query(User).first() + if not user: + print("无可用用户,无法签发 JWT", file=sys.stderr) + return 1 + token = create_access_token(data={"sub": user.id, "username": user.username}) + headers = {"Authorization": f"Bearer {token}"} + uid = f"e2e_xiaoqi_{uuid.uuid4().hex[:10]}" + print(f"agent_id={agent.id} owner={user.username} user_id={uid}") + print(f"请确认工作流 Cache 键为 user_memory_{{{{user_id}}}},请求中已带 user_id={uid}\n") + + def poll(client: httpx.Client, execution_id: str, timeout: float = 300.0) -> dict: + t0 = time.time() + while time.time() - t0 < timeout: + r = client.get(f"/api/v1/executions/{execution_id}", headers=headers) + r.raise_for_status() + data = r.json() + st = data.get("status") + if st == "completed": + return data + if st == "failed": + print("error:", data.get("error_message"), file=sys.stderr) + raise RuntimeError("执行失败") + time.sleep(1) + raise TimeoutError("等待执行完成超时") + + with httpx.Client(base_url=API_BASE, timeout=300.0) as client: + r = client.post( + "/api/v1/executions", + json={ + "agent_id": str(agent.id), + "input_data": {"query": "我的名字叫小七", "user_id": uid}, + }, + headers=headers, + ) + if r.status_code >= 400: + print(r.text, file=sys.stderr) + r.raise_for_status() + eid1 = r.json()["id"] + print("第一轮 execution_id:", eid1) + out1 = poll(client, eid1) + print("第一轮 output_data:", json.dumps(out1.get("output_data"), ensure_ascii=False)[:1200]) + + r = client.post( + "/api/v1/executions", + json={ + "agent_id": str(agent.id), + "input_data": {"query": "我叫什么名字?", "user_id": uid}, + }, + headers=headers, + ) + r.raise_for_status() + eid2 = r.json()["id"] + print("\n第二轮 execution_id:", eid2) + out2 = poll(client, eid2) + print("第二轮 output_data:", json.dumps(out2.get("output_data"), ensure_ascii=False)[:1200]) + + # Redis 键检查 + try: + from app.core.config import settings + import redis as redis_lib + + url = getattr(settings, "REDIS_URL", None) or "redis://localhost:6379/0" + rc = redis_lib.from_url(url, decode_responses=True) + key = f"user_memory_{uid}" + raw = rc.get(key) + print(f"\nRedis 键 {key}:", "存在" if raw else "不存在") + if raw: + try: + mem = json.loads(raw) + print("memory.user_profile:", mem.get("user_profile")) + except Exception as ex: + print("解析 Redis 值失败:", ex) + except Exception as ex: + print("Redis 检查跳过:", ex) + + finally: + db.close() + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/e2e_zhini9_test.py b/backend/scripts/e2e_zhini9_test.py new file mode 100644 index 0000000..1d50ee1 --- /dev/null +++ b/backend/scripts/e2e_zhini9_test.py @@ -0,0 +1,116 @@ +""" +对「知你客服9号」做多轮 API 测试(不默认重启 Celery,避免打断本机 Worker)。 +轮次:①自我介绍姓名 ②陈述偏好(供摘要/关键词召回)③闲聊 ④问姓名 + 问偏好 + +用法: + cd backend && .\\venv\\Scripts\\python.exe scripts/e2e_zhini9_test.py +环境变量: API_BASE, E2E_AGENT_NAME(默认 知你客服9号) +""" +from __future__ import annotations + +import json +import os +import sys +import time +import uuid + +BACKEND_DIR = __file__.rsplit("scripts", 1)[0] +API_BASE = os.environ.get("API_BASE", "http://127.0.0.1:8037") +AGENT_NAME = os.environ.get("E2E_AGENT_NAME", "知你客服9号") + + +def main() -> int: + os.chdir(BACKEND_DIR) + sys.path.insert(0, BACKEND_DIR) + + import httpx + from app.core.database import SessionLocal + from app.core.security import create_access_token + from app.models.agent import Agent + from app.models.user import User + + db = SessionLocal() + try: + agent = db.query(Agent).filter(Agent.name == AGENT_NAME).first() + if not agent: + print(f"数据库中未找到「{AGENT_NAME}」", file=sys.stderr) + return 1 + owner = db.query(User).filter(User.id == agent.user_id).first() + user = owner or db.query(User).first() + if not user: + print("无可用用户", file=sys.stderr) + return 1 + token = create_access_token(data={"sub": user.id, "username": user.username}) + headers = {"Authorization": f"Bearer {token}"} + uid = f"e2e_z9_{uuid.uuid4().hex[:10]}" + print(f"agent_id={agent.id} name={agent.name} user_id={uid}\n") + + def poll(client: httpx.Client, execution_id: str, timeout: float = 300.0) -> dict: + t0 = time.time() + while time.time() - t0 < timeout: + r = client.get(f"/api/v1/executions/{execution_id}", headers=headers) + r.raise_for_status() + data = r.json() + st = data.get("status") + if st == "completed": + return data + if st == "failed": + print("error:", data.get("error_message"), file=sys.stderr) + raise RuntimeError("执行失败") + time.sleep(1) + raise TimeoutError("超时") + + def extract_reply(out: dict) -> str: + od = out.get("output_data") or {} + if isinstance(od, dict): + r = od.get("result") + if isinstance(r, str): + return r[:500] + return json.dumps(od, ensure_ascii=False)[:500] + + rounds = [ + "我的名字叫阿九", + "记住:我最爱吃火锅,不喜欢甜食。", + "今天天气不错吧?", + "我叫什么名字?你还记得我喜欢吃什么吗?", + ] + + with httpx.Client(base_url=API_BASE, timeout=300.0) as client: + for i, q in enumerate(rounds, 1): + r = client.post( + "/api/v1/executions", + json={"agent_id": str(agent.id), "input_data": {"query": q, "user_id": uid}}, + headers=headers, + ) + if r.status_code >= 400: + print(r.text, file=sys.stderr) + r.raise_for_status() + eid = r.json()["id"] + out = poll(client, eid) + print(f"--- 第{i}轮 ---\nQ: {q}\nA: {extract_reply(out)}\n") + + try: + from app.core.config import settings + import redis as redis_lib + + url = getattr(settings, "REDIS_URL", None) or "redis://localhost:6379/0" + rc = redis_lib.from_url(url, decode_responses=True) + key = f"user_memory_{uid}" + raw = rc.get(key) + print(f"Redis {key}:", "有" if raw else "无") + if raw: + mem = json.loads(raw) + print("conversation_summary 前120字:", str(mem.get("conversation_summary", ""))[:120]) + print("user_profile:", mem.get("user_profile")) + print("history 条数:", len(mem.get("conversation_history") or [])) + except Exception as ex: + print("Redis 检查:", ex) + + finally: + db.close() + print("完成") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/patch_code_build_memory_value_no_import.py b/backend/scripts/patch_code_build_memory_value_no_import.py new file mode 100644 index 0000000..44da371 --- /dev/null +++ b/backend/scripts/patch_code_build_memory_value_no_import.py @@ -0,0 +1,57 @@ +""" +代码节点 code-build-memory-value 首行「from datetime import datetime」在受限 exec 下会触发 __import__ 不可用。 +引擎已在 __builtins__ 中注入 datetime 类,去掉该行即可。 + +默认修补名为「知你客服12号」的 Agent;也可用环境变量 PATCH_AGENT_NAME=ALL 修补所有含该节点的 Agent。 +""" +from __future__ import annotations + +import os +import sys + +BACKEND = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path.insert(0, BACKEND) + +from sqlalchemy.orm.attributes import flag_modified + +from app.core.database import SessionLocal +from app.models.agent import Agent + +OLD_PREFIX = "from datetime import datetime\n" + + +def main() -> int: + name = os.environ.get("PATCH_AGENT_NAME", "知你客服12号") + db = SessionLocal() + try: + q = db.query(Agent) + if name.upper() != "ALL": + q = q.filter(Agent.name == name) + agents = q.all() + n_patched = 0 + for a in agents: + wf = a.workflow_config + if not wf or "nodes" not in wf: + continue + touched = False + for n in wf.get("nodes") or []: + if n.get("id") != "code-build-memory-value": + continue + c = (n.get("data") or {}).get("code") or "" + if c.startswith(OLD_PREFIX): + n.setdefault("data", {})["code"] = c[len(OLD_PREFIX) :] + touched = True + if touched: + a.workflow_config = wf + flag_modified(a, "workflow_config") + n_patched += 1 + print("已修补:", a.name, a.id) + db.commit() + print("合计修补 Agent 数:", n_patched) + return 0 + finally: + db.close() + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/patch_zhini_code_build_context_scope.py b/backend/scripts/patch_zhini_code_build_context_scope.py new file mode 100644 index 0000000..627d6cf --- /dev/null +++ b/backend/scripts/patch_zhini_code_build_context_scope.py @@ -0,0 +1,51 @@ +"""修复 code-build-context 中列表推导式 r 与前面 genexp 的 r 作用域冲突(就地更新数据库 Agent)。""" +from __future__ import annotations + +import os +import sys + +BACKEND = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path.insert(0, BACKEND) + +from sqlalchemy.orm.attributes import flag_modified + +from app.core.database import SessionLocal +from app.models.agent import Agent + +OLD_VEC = "vec_str = '\\n'.join((r.get('text') or r.get('content') or '') for r in right)" +NEW_VEC = "vec_str = '\\n'.join((rec.get('text') or rec.get('content') or '') for rec in right)" +OLD_KW = 'kw_lines = [f"{r}: {t}" for _, r, t in scored[:6]]' +NEW_KW = 'kw_lines = [f"{role}: {text}" for _, role, text in scored[:6]]' + + +def main() -> int: + name = os.environ.get("PATCH_AGENT_NAME", "知你客服11号") + db = SessionLocal() + try: + a = db.query(Agent).filter(Agent.name == name).first() + if not a: + print("未找到", name, file=sys.stderr) + return 1 + wf = a.workflow_config + for n in wf.get("nodes", []): + if n.get("id") != "code-build-context": + continue + c = n.get("data", {}).get("code", "") + c2 = c.replace(OLD_VEC, NEW_VEC).replace(OLD_KW, NEW_KW) + if c2 == c: + print("无需替换(可能已修复或内容不同)") + return 0 + n.setdefault("data", {})["code"] = c2 + a.workflow_config = wf + flag_modified(a, "workflow_config") + db.commit() + print("已更新", name, "code-build-context") + return 0 + print("未找到 code-build-context 节点", file=sys.stderr) + return 1 + finally: + db.close() + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/patch_zhini_kefu_12_prompt_tools.py b/backend/scripts/patch_zhini_kefu_12_prompt_tools.py new file mode 100644 index 0000000..ada61ec --- /dev/null +++ b/backend/scripts/patch_zhini_kefu_12_prompt_tools.py @@ -0,0 +1,91 @@ +"""就地更新「知你客服12号」:启用 system_info + 修订 LLM 提示词(工作区路径、工具反馈)。""" +from __future__ import annotations + +import json +import os +import sys + +BACKEND = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path.insert(0, BACKEND) + +from sqlalchemy.orm.attributes import flag_modified + +from app.core.database import SessionLocal +from app.models.agent import Agent + +# 与 create_zhini_kefu_12.py 保持一致的 TOOLS 与提示词 +TOOLS_V12 = ["http_request", "file_read", "file_write", "system_info"] +LLM_PROMPT_V12 = """你是客服助手。根据用户输入、用户画像、助手称呼、远期摘要、检索片段与最近对话生成回复。 + +【工具 http_request】 +- 用户给出 http(s) 链接且需要抓网页/API 时,先调用 http_request:参数 url 为完整链接,method 必填(一般为 GET)。 +- 根据返回 JSON 中的 body 字段提炼要点;非 URL 问答不要无故调用。 + +【工具 system_info(工作区路径)】 +- 用户问「工作区路径」「能访问哪个目录」「file 根目录在哪」时,**必须调用 system_info**,用返回 JSON 里的 **local_file_workspace_root** 原样告知用户(不要用「临时目录」「无法显示」等推脱)。 + +【工具 file_read / file_write(本地文件)】 +- 仅当用户明确要「读文件」「写入某路径」「保存到本地文件」等时使用。 +- file_read:参数 file_path 可为**相对工作区根的相对路径**,或**落在工作区根之下的绝对路径**(Windows 如 `D:\\...`,Linux 如 `/home/...`),二者等价,由后端校验。 +- file_write:参数 file_path、content;mode 用 w 覆盖或 a 追加。写入前确认路径有意、避免覆盖重要文件;不要写入密钥、令牌。 +- **禁止**以「不能访问 D: 盘」「只能相对路径」「工具看不到绝对路径」等理由拒绝用户:只要用户给的绝对路径以 `system_info` 返回的 `local_file_workspace_root` 为前缀(同一盘符、规范化后在其子路径下),就应**直接调用 file_write**,例如根为 `D:\\aaa\\aiagent` 时,`D:\\aaa\\aiagent\\user_data\\xxx.md` **合法**,可优先用用户原文路径或简写为相对路径 `user_data/xxx.md`。 +- 路径必须落在平台允许的工作区内,否则会报错;不要尝试访问工作区外的路径。 +- **禁止**假设工作区是 `/workspace` 或未经验证的目录;工作区根**只信** `local_file_workspace_root`。 +- **每次调用 file_write / file_read 后,必须在最终 reply 中说明工具返回结果**:成功则写明路径与要点;失败则引用返回 JSON 中的 error 字段,不得假装已成功。 +- **严禁编造工具返回**:reply 中若引用 file_write/file_read/system_info 的 JSON,必须与工具实际返回字符串一致(可原样粘贴)。禁止臆造路径(例如 /tmp/...、/workspace/...)或与当前系统不符的路径;若未调用工具,禁止在 reply 里写伪造的 JSON。 + +【称呼规则】(与 10/11 一致) +- user_profile.name 表示用户昵称;assistant_display_name 表示用户为你起的称呼。 +- 用户问「你叫什么」时用 assistant_display_name(若有);勿把用户姓名写入 assistant_display_name。 + +【最终输出格式(强制)】 +- 最后一条回复必须是**一行合法 JSON**,无 markdown、无代码围栏;含 intent、reply、user_profile(对象)。 + +上下文: +用户输入:{{user_input}} +用户画像:{{memory.user_profile}} +助手对外称呼:{{memory.assistant_display_name}} +远期摘要:{{memory.conversation_summary}} +相关历史(检索):{{memory.relevant_from_retrieval}} +最近几轮:{{memory.recent_turns}} +""" + + +def main() -> int: + name = os.environ.get("PATCH_AGENT_NAME", "知你客服12号") + db = SessionLocal() + try: + a = db.query(Agent).filter(Agent.name == name).first() + if not a: + print("未找到", name, file=sys.stderr) + return 1 + wf = dict(a.workflow_config) if a.workflow_config else {} + nodes = list(wf.get("nodes") or []) + done = False + for i, n in enumerate(nodes): + if n.get("id") != "llm-unified": + continue + d = dict(n.get("data") or {}) + d["prompt"] = LLM_PROMPT_V12 + d["enable_tools"] = True + d["tools"] = list(TOOLS_V12) + d["selected_tools"] = list(TOOLS_V12) + nodes[i] = {**n, "data": d} + done = True + break + if not done: + print("未找到 llm-unified", file=sys.stderr) + return 1 + wf["nodes"] = nodes + a.workflow_config = wf + flag_modified(a, "workflow_config") + db.commit() + print("已更新", name, "llm-unified: tools=", TOOLS_V12) + print(json.dumps({"name": name, "id": str(a.id)}, ensure_ascii=False)) + return 0 + finally: + db.close() + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/restart_api_worker.ps1 b/backend/scripts/restart_api_worker.ps1 new file mode 100644 index 0000000..19eb679 --- /dev/null +++ b/backend/scripts/restart_api_worker.ps1 @@ -0,0 +1,41 @@ +$ErrorActionPreference = "SilentlyContinue" +$backend = "D:\aaa\aiagent\backend" + +Get-CimInstance Win32_Process | Where-Object { + $_.CommandLine -and $_.CommandLine -match "celery" -and $_.CommandLine -match "celery_app" +} | ForEach-Object { + Write-Host "Stop Celery PID $($_.ProcessId)" + Stop-Process -Id $_.ProcessId -Force +} + +Get-CimInstance Win32_Process | Where-Object { + $_.CommandLine -and $_.CommandLine -match "uvicorn" -and $_.CommandLine -match "app.main:app" +} | ForEach-Object { + Write-Host "Stop Uvicorn PID $($_.ProcessId)" + Stop-Process -Id $_.ProcessId -Force +} + +Start-Sleep -Seconds 2 + +$py = Join-Path $backend "venv\Scripts\python.exe" +Write-Host "Start Uvicorn :8037 ..." +Start-Process -FilePath $py -ArgumentList @( + "-m", "uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8037", "--reload" +) -WorkingDirectory $backend -WindowStyle Minimized + +Start-Sleep -Seconds 2 + +Write-Host "Start Celery worker ..." +Start-Process -FilePath $py -ArgumentList @( + "-m", "celery", "-A", "app.core.celery_app", "worker", + "--loglevel=info", "--pool=threads", "--concurrency=8" +) -WorkingDirectory $backend -WindowStyle Minimized + +Start-Sleep -Seconds 3 +try { + $r = Invoke-WebRequest -Uri "http://127.0.0.1:8037/health" -UseBasicParsing -TimeoutSec 15 + Write-Host "health: $($r.Content)" +} catch { + Write-Host "health check failed: $($_.Exception.Message)" +} +Write-Host "Done." diff --git a/backend/scripts/test_write_user_data_aaa_md.py b/backend/scripts/test_write_user_data_aaa_md.py new file mode 100644 index 0000000..559ad62 --- /dev/null +++ b/backend/scripts/test_write_user_data_aaa_md.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +""" +测试在 D:\\aaa\\aiagent\\user_data 下创建 aaa.md(与线上一致的 file_write_tool)。 + +用法(在 backend 目录): + .\\venv\\Scripts\\python.exe scripts\\test_write_user_data_aaa_md.py + +可选环境变量: + TEST_MD_CONTENT 写入内容,默认一行时间戳 + TEST_USE_ABSPATH 设为 1 时使用绝对路径 D:\\aaa\\aiagent\\user_data\\aaa.md,否则用相对路径 user_data/aaa.md +""" +from __future__ import annotations + +import asyncio +import os +import sys +from datetime import datetime, timezone +from pathlib import Path + +BACKEND_DIR = Path(__file__).resolve().parents[1] + + +def main() -> int: + os.chdir(BACKEND_DIR) + sys.path.insert(0, str(BACKEND_DIR)) + + from app.services.builtin_tools import _local_file_workspace_root, file_write_tool + + root = _local_file_workspace_root() + content = os.environ.get( + "TEST_MD_CONTENT", + f"# aaa\\n\\nwritten by test_write_user_data_aaa_md.py at {datetime.now(timezone.utc).isoformat()}\\n", + ).replace("\\n", "\n") + + if os.environ.get("TEST_USE_ABSPATH", "").strip() in ("1", "true", "yes"): + target = root / "user_data" / "aaa.md" + file_path_arg = str(target) + else: + file_path_arg = "user_data/aaa.md" + + print("workspace root:", root) + print("file_path:", file_path_arg) + print("content bytes (utf-8):", len(content.encode("utf-8"))) + + raw = asyncio.run(file_write_tool(file_path_arg, content, "w")) + print("tool return:", raw) + + resolved = (root / "user_data" / "aaa.md").resolve() + if not resolved.is_file(): + print("FAIL: file missing:", resolved, file=sys.stderr) + return 2 + on_disk = resolved.read_text(encoding="utf-8", errors="replace") + print("OK: on disk", resolved) + print("--- file head ---") + print(on_disk[:500]) + print("---") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/backend/scripts/update_zhini7_prompt.py b/backend/scripts/update_zhini7_prompt.py new file mode 100644 index 0000000..a8fefc7 --- /dev/null +++ b/backend/scripts/update_zhini7_prompt.py @@ -0,0 +1,25 @@ +import re +import requests + +BASE = "http://127.0.0.1:8037" +AID = "688c2c41-dcd1-4285-b193-6bed00c485c2" + +text = open("scripts/create_zhini_kefu_7.py", encoding="utf-8").read() +m = re.search(r'LLM_PROMPT = """(.*?)"""', text, re.S) +prompt = m.group(1).strip() + +r = requests.post( + f"{BASE}/api/v1/auth/login", + data={"username": "admin", "password": "123456"}, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + timeout=15, +) +h = {"Authorization": f"Bearer {r.json()['access_token']}", "Content-Type": "application/json"} +g = requests.get(f"{BASE}/api/v1/agents/{AID}", headers=h, timeout=30).json() +wf = g["workflow_config"] +for n in wf["nodes"]: + if n.get("id") == "llm-unified": + n["data"]["prompt"] = prompt + break +up = requests.put(f"{BASE}/api/v1/agents/{AID}", headers=h, json={"workflow_config": wf}, timeout=60) +print(up.status_code, up.text[:300]) diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml new file mode 100644 index 0000000..2354269 --- /dev/null +++ b/frontend/pnpm-lock.yaml @@ -0,0 +1,2936 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + dependencies: + '@element-plus/icons-vue': + specifier: ^2.3.1 + version: 2.3.2(vue@3.5.32(typescript@5.3.3)) + '@vue-flow/background': + specifier: ^1.1.0 + version: 1.3.2(@vue-flow/core@1.48.2(vue@3.5.32(typescript@5.3.3)))(vue@3.5.32(typescript@5.3.3)) + '@vue-flow/controls': + specifier: ^1.1.0 + version: 1.1.3(@vue-flow/core@1.48.2(vue@3.5.32(typescript@5.3.3)))(vue@3.5.32(typescript@5.3.3)) + '@vue-flow/core': + specifier: ^1.30.0 + version: 1.48.2(vue@3.5.32(typescript@5.3.3)) + '@vue-flow/minimap': + specifier: ^1.1.0 + version: 1.5.4(@vue-flow/core@1.48.2(vue@3.5.32(typescript@5.3.3)))(vue@3.5.32(typescript@5.3.3)) + axios: + specifier: ^1.6.2 + version: 1.14.0 + element-plus: + specifier: ^2.4.4 + version: 2.13.6(typescript@5.3.3)(vue@3.5.32(typescript@5.3.3)) + monaco-editor: + specifier: ^0.44.0 + version: 0.44.0 + pinia: + specifier: ^2.1.7 + version: 2.3.1(typescript@5.3.3)(vue@3.5.32(typescript@5.3.3)) + socket.io-client: + specifier: ^4.6.1 + version: 4.8.3 + vee-validate: + specifier: ^4.12.0 + version: 4.15.1(vue@3.5.32(typescript@5.3.3)) + vue: + specifier: ^3.4.0 + version: 3.5.32(typescript@5.3.3) + vue-router: + specifier: ^4.2.5 + version: 4.6.4(vue@3.5.32(typescript@5.3.3)) + yup: + specifier: ^1.3.3 + version: 1.7.1 + devDependencies: + '@types/node': + specifier: ^20.10.0 + version: 20.19.39 + '@vitejs/plugin-vue': + specifier: ^4.5.2 + version: 4.6.2(vite@5.4.21(@types/node@20.19.39))(vue@3.5.32(typescript@5.3.3)) + '@vue/eslint-config-prettier': + specifier: ^9.0.0 + version: 9.0.0(eslint@8.57.1)(prettier@3.8.1) + '@vue/eslint-config-typescript': + specifier: ^12.0.0 + version: 12.0.0(eslint-plugin-vue@9.33.0(eslint@8.57.1))(eslint@8.57.1)(typescript@5.3.3) + '@vue/tsconfig': + specifier: ^0.5.1 + version: 0.5.1 + eslint: + specifier: ^8.55.0 + version: 8.57.1 + eslint-plugin-vue: + specifier: ^9.19.2 + version: 9.33.0(eslint@8.57.1) + prettier: + specifier: ^3.1.1 + version: 3.8.1 + typescript: + specifier: ~5.3.3 + version: 5.3.3 + vite: + specifier: ^5.0.8 + version: 5.4.21(@types/node@20.19.39) + vue-tsc: + specifier: ^1.8.25 + version: 1.8.27(typescript@5.3.3) + +packages: + + '@babel/helper-string-parser@7.27.1': + resolution: {integrity: sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.28.5': + resolution: {integrity: sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.29.2': + resolution: {integrity: sha512-4GgRzy/+fsBa72/RZVJmGKPmZu9Byn8o4MoLpmNe1m8ZfYnz5emHLQz3U4gLud6Zwl0RZIcgiLD7Uq7ySFuDLA==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/types@7.29.0': + resolution: {integrity: sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==} + engines: {node: '>=6.9.0'} + + '@ctrl/tinycolor@4.2.0': + resolution: {integrity: sha512-kzyuwOAQnXJNLS9PSyrk0CWk35nWJW/zl/6KvnTBMFK65gm7U1/Z5BqjxeapjZCIhQcM/DsrEmcbRwDyXyXK4A==} + engines: {node: '>=14'} + + '@element-plus/icons-vue@2.3.2': + resolution: {integrity: sha512-OzIuTaIfC8QXEPmJvB4Y4kw34rSXdCJzxcD1kFStBvr8bK6X1zQAYDo0CNMjojnfTqRQCJ0I7prlErcoRiET2A==} + peerDependencies: + vue: ^3.2.0 + + '@esbuild/aix-ppc64@0.21.5': + resolution: {integrity: sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.21.5': + resolution: {integrity: sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==} + engines: {node: '>=12'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.21.5': + resolution: {integrity: sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==} + engines: {node: '>=12'} + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.21.5': + resolution: {integrity: sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==} + engines: {node: '>=12'} + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.21.5': + resolution: {integrity: sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==} + engines: {node: '>=12'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.21.5': + resolution: {integrity: sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==} + engines: {node: '>=12'} + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.21.5': + resolution: {integrity: sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==} + engines: {node: '>=12'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.21.5': + resolution: {integrity: sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.21.5': + resolution: {integrity: sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==} + engines: {node: '>=12'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.21.5': + resolution: {integrity: sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==} + engines: {node: '>=12'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.21.5': + resolution: {integrity: sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==} + engines: {node: '>=12'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.21.5': + resolution: {integrity: sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==} + engines: {node: '>=12'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.21.5': + resolution: {integrity: sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==} + engines: {node: '>=12'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.21.5': + resolution: {integrity: sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.21.5': + resolution: {integrity: sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==} + engines: {node: '>=12'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.21.5': + resolution: {integrity: sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==} + engines: {node: '>=12'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.21.5': + resolution: {integrity: sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-x64@0.21.5': + resolution: {integrity: sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==} + engines: {node: '>=12'} + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-x64@0.21.5': + resolution: {integrity: sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==} + engines: {node: '>=12'} + cpu: [x64] + os: [openbsd] + + '@esbuild/sunos-x64@0.21.5': + resolution: {integrity: sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==} + engines: {node: '>=12'} + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.21.5': + resolution: {integrity: sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==} + engines: {node: '>=12'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.21.5': + resolution: {integrity: sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==} + engines: {node: '>=12'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.21.5': + resolution: {integrity: sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==} + engines: {node: '>=12'} + cpu: [x64] + os: [win32] + + '@eslint-community/eslint-utils@4.9.1': + resolution: {integrity: sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + + '@eslint-community/regexpp@4.12.2': + resolution: {integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + + '@eslint/eslintrc@2.1.4': + resolution: {integrity: sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + '@eslint/js@8.57.1': + resolution: {integrity: sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + '@floating-ui/core@1.7.5': + resolution: {integrity: sha512-1Ih4WTWyw0+lKyFMcBHGbb5U5FtuHJuujoyyr5zTaWS5EYMeT6Jb2AuDeftsCsEuchO+mM2ij5+q9crhydzLhQ==} + + '@floating-ui/dom@1.7.6': + resolution: {integrity: sha512-9gZSAI5XM36880PPMm//9dfiEngYoC6Am2izES1FF406YFsjvyBMmeJ2g4SAju3xWwtuynNRFL2s9hgxpLI5SQ==} + + '@floating-ui/utils@0.2.11': + resolution: {integrity: sha512-RiB/yIh78pcIxl6lLMG0CgBXAZ2Y0eVHqMPYugu+9U0AeT6YBeiJpf7lbdJNIugFP5SIjwNRgo4DhR1Qxi26Gg==} + + '@humanwhocodes/config-array@0.13.0': + resolution: {integrity: sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==} + engines: {node: '>=10.10.0'} + deprecated: Use @eslint/config-array instead + + '@humanwhocodes/module-importer@1.0.1': + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + + '@humanwhocodes/object-schema@2.0.3': + resolution: {integrity: sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==} + deprecated: Use @eslint/object-schema instead + + '@jridgewell/sourcemap-codec@1.5.5': + resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} + + '@nodelib/fs.scandir@2.1.5': + resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} + engines: {node: '>= 8'} + + '@nodelib/fs.stat@2.0.5': + resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} + engines: {node: '>= 8'} + + '@nodelib/fs.walk@1.2.8': + resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} + engines: {node: '>= 8'} + + '@pkgr/core@0.2.9': + resolution: {integrity: sha512-QNqXyfVS2wm9hweSYD2O7F0G06uurj9kZ96TRQE5Y9hU7+tgdZwIkbAKc5Ocy1HxEY2kuDQa6cQ1WRs/O5LFKA==} + engines: {node: ^12.20.0 || ^14.18.0 || >=16.0.0} + + '@rollup/rollup-android-arm-eabi@4.60.1': + resolution: {integrity: sha512-d6FinEBLdIiK+1uACUttJKfgZREXrF0Qc2SmLII7W2AD8FfiZ9Wjd+rD/iRuf5s5dWrr1GgwXCvPqOuDquOowA==} + cpu: [arm] + os: [android] + + '@rollup/rollup-android-arm64@4.60.1': + resolution: {integrity: sha512-YjG/EwIDvvYI1YvYbHvDz/BYHtkY4ygUIXHnTdLhG+hKIQFBiosfWiACWortsKPKU/+dUwQQCKQM3qrDe8c9BA==} + cpu: [arm64] + os: [android] + + '@rollup/rollup-darwin-arm64@4.60.1': + resolution: {integrity: sha512-mjCpF7GmkRtSJwon+Rq1N8+pI+8l7w5g9Z3vWj4T7abguC4Czwi3Yu/pFaLvA3TTeMVjnu3ctigusqWUfjZzvw==} + cpu: [arm64] + os: [darwin] + + '@rollup/rollup-darwin-x64@4.60.1': + resolution: {integrity: sha512-haZ7hJ1JT4e9hqkoT9R/19XW2QKqjfJVv+i5AGg57S+nLk9lQnJ1F/eZloRO3o9Scy9CM3wQ9l+dkXtcBgN5Ew==} + cpu: [x64] + os: [darwin] + + '@rollup/rollup-freebsd-arm64@4.60.1': + resolution: {integrity: sha512-czw90wpQq3ZsAVBlinZjAYTKduOjTywlG7fEeWKUA7oCmpA8xdTkxZZlwNJKWqILlq0wehoZcJYfBvOyhPTQ6w==} + cpu: [arm64] + os: [freebsd] + + '@rollup/rollup-freebsd-x64@4.60.1': + resolution: {integrity: sha512-KVB2rqsxTHuBtfOeySEyzEOB7ltlB/ux38iu2rBQzkjbwRVlkhAGIEDiiYnO2kFOkJp+Z7pUXKyrRRFuFUKt+g==} + cpu: [x64] + os: [freebsd] + + '@rollup/rollup-linux-arm-gnueabihf@4.60.1': + resolution: {integrity: sha512-L+34Qqil+v5uC0zEubW7uByo78WOCIrBvci69E7sFASRl0X7b/MB6Cqd1lky/CtcSVTydWa2WZwFuWexjS5o6g==} + cpu: [arm] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-arm-musleabihf@4.60.1': + resolution: {integrity: sha512-n83O8rt4v34hgFzlkb1ycniJh7IR5RCIqt6mz1VRJD6pmhRi0CXdmfnLu9dIUS6buzh60IvACM842Ffb3xd6Gg==} + cpu: [arm] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-arm64-gnu@4.60.1': + resolution: {integrity: sha512-Nql7sTeAzhTAja3QXeAI48+/+GjBJ+QmAH13snn0AJSNL50JsDqotyudHyMbO2RbJkskbMbFJfIJKWA6R1LCJQ==} + cpu: [arm64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-arm64-musl@4.60.1': + resolution: {integrity: sha512-+pUymDhd0ys9GcKZPPWlFiZ67sTWV5UU6zOJat02M1+PiuSGDziyRuI/pPue3hoUwm2uGfxdL+trT6Z9rxnlMA==} + cpu: [arm64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-loong64-gnu@4.60.1': + resolution: {integrity: sha512-VSvgvQeIcsEvY4bKDHEDWcpW4Yw7BtlKG1GUT4FzBUlEKQK0rWHYBqQt6Fm2taXS+1bXvJT6kICu5ZwqKCnvlQ==} + cpu: [loong64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-loong64-musl@4.60.1': + resolution: {integrity: sha512-4LqhUomJqwe641gsPp6xLfhqWMbQV04KtPp7/dIp0nzPxAkNY1AbwL5W0MQpcalLYk07vaW9Kp1PBhdpZYYcEw==} + cpu: [loong64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-ppc64-gnu@4.60.1': + resolution: {integrity: sha512-tLQQ9aPvkBxOc/EUT6j3pyeMD6Hb8QF2BTBnCQWP/uu1lhc9AIrIjKnLYMEroIz/JvtGYgI9dF3AxHZNaEH0rw==} + cpu: [ppc64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-ppc64-musl@4.60.1': + resolution: {integrity: sha512-RMxFhJwc9fSXP6PqmAz4cbv3kAyvD1etJFjTx4ONqFP9DkTkXsAMU4v3Vyc5BgzC+anz7nS/9tp4obsKfqkDHg==} + cpu: [ppc64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-riscv64-gnu@4.60.1': + resolution: {integrity: sha512-QKgFl+Yc1eEk6MmOBfRHYF6lTxiiiV3/z/BRrbSiW2I7AFTXoBFvdMEyglohPj//2mZS4hDOqeB0H1ACh3sBbg==} + cpu: [riscv64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-riscv64-musl@4.60.1': + resolution: {integrity: sha512-RAjXjP/8c6ZtzatZcA1RaQr6O1TRhzC+adn8YZDnChliZHviqIjmvFwHcxi4JKPSDAt6Uhf/7vqcBzQJy0PDJg==} + cpu: [riscv64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-s390x-gnu@4.60.1': + resolution: {integrity: sha512-wcuocpaOlaL1COBYiA89O6yfjlp3RwKDeTIA0hM7OpmhR1Bjo9j31G1uQVpDlTvwxGn2nQs65fBFL5UFd76FcQ==} + cpu: [s390x] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-x64-gnu@4.60.1': + resolution: {integrity: sha512-77PpsFQUCOiZR9+LQEFg9GClyfkNXj1MP6wRnzYs0EeWbPcHs02AXu4xuUbM1zhwn3wqaizle3AEYg5aeoohhg==} + cpu: [x64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-x64-musl@4.60.1': + resolution: {integrity: sha512-5cIATbk5vynAjqqmyBjlciMJl1+R/CwX9oLk/EyiFXDWd95KpHdrOJT//rnUl4cUcskrd0jCCw3wpZnhIHdD9w==} + cpu: [x64] + os: [linux] + libc: [musl] + + '@rollup/rollup-openbsd-x64@4.60.1': + resolution: {integrity: sha512-cl0w09WsCi17mcmWqqglez9Gk8isgeWvoUZ3WiJFYSR3zjBQc2J5/ihSjpl+VLjPqjQ/1hJRcqBfLjssREQILw==} + cpu: [x64] + os: [openbsd] + + '@rollup/rollup-openharmony-arm64@4.60.1': + resolution: {integrity: sha512-4Cv23ZrONRbNtbZa37mLSueXUCtN7MXccChtKpUnQNgF010rjrjfHx3QxkS2PI7LqGT5xXyYs1a7LbzAwT0iCA==} + cpu: [arm64] + os: [openharmony] + + '@rollup/rollup-win32-arm64-msvc@4.60.1': + resolution: {integrity: sha512-i1okWYkA4FJICtr7KpYzFpRTHgy5jdDbZiWfvny21iIKky5YExiDXP+zbXzm3dUcFpkEeYNHgQ5fuG236JPq0g==} + cpu: [arm64] + os: [win32] + + '@rollup/rollup-win32-ia32-msvc@4.60.1': + resolution: {integrity: sha512-u09m3CuwLzShA0EYKMNiFgcjjzwqtUMLmuCJLeZWjjOYA3IT2Di09KaxGBTP9xVztWyIWjVdsB2E9goMjZvTQg==} + cpu: [ia32] + os: [win32] + + '@rollup/rollup-win32-x64-gnu@4.60.1': + resolution: {integrity: sha512-k+600V9Zl1CM7eZxJgMyTUzmrmhB/0XZnF4pRypKAlAgxmedUA+1v9R+XOFv56W4SlHEzfeMtzujLJD22Uz5zg==} + cpu: [x64] + os: [win32] + + '@rollup/rollup-win32-x64-msvc@4.60.1': + resolution: {integrity: sha512-lWMnixq/QzxyhTV6NjQJ4SFo1J6PvOX8vUx5Wb4bBPsEb+8xZ89Bz6kOXpfXj9ak9AHTQVQzlgzBEc1SyM27xQ==} + cpu: [x64] + os: [win32] + + '@socket.io/component-emitter@3.1.2': + resolution: {integrity: sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==} + + '@sxzz/popperjs-es@2.11.8': + resolution: {integrity: sha512-wOwESXvvED3S8xBmcPWHs2dUuzrE4XiZeFu7e1hROIJkm02a49N120pmOXxY33sBb6hArItm5W5tcg1cBtV+HQ==} + + '@types/estree@1.0.8': + resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + + '@types/json-schema@7.0.15': + resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} + + '@types/lodash-es@4.17.12': + resolution: {integrity: sha512-0NgftHUcV4v34VhXm8QBSftKVXtbkBG3ViCjs6+eJ5a6y6Mi/jiFGPc1sC7QK+9BFhWrURE3EOggmWaSxL9OzQ==} + + '@types/lodash@4.17.24': + resolution: {integrity: sha512-gIW7lQLZbue7lRSWEFql49QJJWThrTFFeIMJdp3eH4tKoxm1OvEPg02rm4wCCSHS0cL3/Fizimb35b7k8atwsQ==} + + '@types/node@20.19.39': + resolution: {integrity: sha512-orrrD74MBUyK8jOAD/r0+lfa1I2MO6I+vAkmAWzMYbCcgrN4lCrmK52gRFQq/JRxfYPfonkr4b0jcY7Olqdqbw==} + + '@types/semver@7.7.1': + resolution: {integrity: sha512-FmgJfu+MOcQ370SD0ev7EI8TlCAfKYU+B4m5T3yXc1CiRN94g/SZPtsCkk506aUDtlMnFZvasDwHHUcZUEaYuA==} + + '@types/web-bluetooth@0.0.20': + resolution: {integrity: sha512-g9gZnnXVq7gM7v3tJCWV/qw7w+KeOlSHAhgF9RytFyifW6AF61hdT2ucrYhPq9hLs5JIryeupHV3qGk95dH9ow==} + + '@typescript-eslint/eslint-plugin@6.21.0': + resolution: {integrity: sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + '@typescript-eslint/parser': ^6.0.0 || ^6.0.0-alpha + eslint: ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/parser@6.21.0': + resolution: {integrity: sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + eslint: ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/scope-manager@6.21.0': + resolution: {integrity: sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg==} + engines: {node: ^16.0.0 || >=18.0.0} + + '@typescript-eslint/type-utils@6.21.0': + resolution: {integrity: sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + eslint: ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/types@6.21.0': + resolution: {integrity: sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==} + engines: {node: ^16.0.0 || >=18.0.0} + + '@typescript-eslint/typescript-estree@6.21.0': + resolution: {integrity: sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/utils@6.21.0': + resolution: {integrity: sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + eslint: ^7.0.0 || ^8.0.0 + + '@typescript-eslint/visitor-keys@6.21.0': + resolution: {integrity: sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==} + engines: {node: ^16.0.0 || >=18.0.0} + + '@ungap/structured-clone@1.3.0': + resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==} + + '@vitejs/plugin-vue@4.6.2': + resolution: {integrity: sha512-kqf7SGFoG+80aZG6Pf+gsZIVvGSCKE98JbiWqcCV9cThtg91Jav0yvYFC9Zb+jKetNGF6ZKeoaxgZfND21fWKw==} + engines: {node: ^14.18.0 || >=16.0.0} + peerDependencies: + vite: ^4.0.0 || ^5.0.0 + vue: ^3.2.25 + + '@volar/language-core@1.11.1': + resolution: {integrity: sha512-dOcNn3i9GgZAcJt43wuaEykSluAuOkQgzni1cuxLxTV0nJKanQztp7FxyswdRILaKH+P2XZMPRp2S4MV/pElCw==} + + '@volar/source-map@1.11.1': + resolution: {integrity: sha512-hJnOnwZ4+WT5iupLRnuzbULZ42L7BWWPMmruzwtLhJfpDVoZLjNBxHDi2sY2bgZXCKlpU5XcsMFoYrsQmPhfZg==} + + '@volar/typescript@1.11.1': + resolution: {integrity: sha512-iU+t2mas/4lYierSnoFOeRFQUhAEMgsFuQxoxvwn5EdQopw43j+J27a4lt9LMInx1gLJBC6qL14WYGlgymaSMQ==} + + '@vue-flow/background@1.3.2': + resolution: {integrity: sha512-eJPhDcLj1wEo45bBoqTXw1uhl0yK2RaQGnEINqvvBsAFKh/camHJd5NPmOdS1w+M9lggc9igUewxaEd3iCQX2w==} + peerDependencies: + '@vue-flow/core': ^1.23.0 + vue: ^3.3.0 + + '@vue-flow/controls@1.1.3': + resolution: {integrity: sha512-XCf+G+jCvaWURdFlZmOjifZGw3XMhN5hHlfMGkWh9xot+9nH9gdTZtn+ldIJKtarg3B21iyHU8JjKDhYcB6JMw==} + peerDependencies: + '@vue-flow/core': ^1.23.0 + vue: ^3.3.0 + + '@vue-flow/core@1.48.2': + resolution: {integrity: sha512-raxhgKWE+G/mcEvXJjGFUDYW9rAI3GOtiHR3ZkNpwBWuIaCC1EYiBmKGwJOoNzVFgwO7COgErnK7i08i287AFA==} + peerDependencies: + vue: ^3.3.0 + + '@vue-flow/minimap@1.5.4': + resolution: {integrity: sha512-l4C+XTAXnRxsRpUdN7cAVFBennC1sVRzq4bDSpVK+ag7tdMczAnhFYGgbLkUw3v3sY6gokyWwMl8CDonp8eB2g==} + peerDependencies: + '@vue-flow/core': ^1.23.0 + vue: ^3.3.0 + + '@vue/compiler-core@3.5.32': + resolution: {integrity: sha512-4x74Tbtqnda8s/NSD6e1Dr5p1c8HdMU5RWSjMSUzb8RTcUQqevDCxVAitcLBKT+ie3o0Dl9crc/S/opJM7qBGQ==} + + '@vue/compiler-dom@3.5.32': + resolution: {integrity: sha512-ybHAu70NtiEI1fvAUz3oXZqkUYEe5J98GjMDpTGl5iHb0T15wQYLR4wE3h9xfuTNA+Cm2f4czfe8B4s+CCH57Q==} + + '@vue/compiler-sfc@3.5.32': + resolution: {integrity: sha512-8UYUYo71cP/0YHMO814TRZlPuUUw3oifHuMR7Wp9SNoRSrxRQnhMLNlCeaODNn6kNTJsjFoQ/kqIj4qGvya4Xg==} + + '@vue/compiler-ssr@3.5.32': + resolution: {integrity: sha512-Gp4gTs22T3DgRotZ8aA/6m2jMR+GMztvBXUBEUOYOcST+giyGWJ4WvFd7QLHBkzTxkfOt8IELKNdpzITLbA2rw==} + + '@vue/devtools-api@6.6.4': + resolution: {integrity: sha512-sGhTPMuXqZ1rVOk32RylztWkfXTRhuS7vgAKv0zjqk8gbsHkJ7xfFf+jbySxt7tWObEJwyKaHMikV/WGDiQm8g==} + + '@vue/devtools-api@7.7.9': + resolution: {integrity: sha512-kIE8wvwlcZ6TJTbNeU2HQNtaxLx3a84aotTITUuL/4bzfPxzajGBOoqjMhwZJ8L9qFYDU/lAYMEEm11dnZOD6g==} + + '@vue/devtools-kit@7.7.9': + resolution: {integrity: sha512-PyQ6odHSgiDVd4hnTP+aDk2X4gl2HmLDfiyEnn3/oV+ckFDuswRs4IbBT7vacMuGdwY/XemxBoh302ctbsptuA==} + + '@vue/devtools-shared@7.7.9': + resolution: {integrity: sha512-iWAb0v2WYf0QWmxCGy0seZNDPdO3Sp5+u78ORnyeonS6MT4PC7VPrryX2BpMJrwlDeaZ6BD4vP4XKjK0SZqaeA==} + + '@vue/eslint-config-prettier@9.0.0': + resolution: {integrity: sha512-z1ZIAAUS9pKzo/ANEfd2sO+v2IUalz7cM/cTLOZ7vRFOPk5/xuRKQteOu1DErFLAh/lYGXMVZ0IfYKlyInuDVg==} + peerDependencies: + eslint: '>= 8.0.0' + prettier: '>= 3.0.0' + + '@vue/eslint-config-typescript@12.0.0': + resolution: {integrity: sha512-StxLFet2Qe97T8+7L8pGlhYBBr8Eg05LPuTDVopQV6il+SK6qqom59BA/rcFipUef2jD8P2X44Vd8tMFytfvlg==} + engines: {node: ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.2.0 || ^7.0.0 || ^8.0.0 + eslint-plugin-vue: ^9.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@vue/language-core@1.8.27': + resolution: {integrity: sha512-L8Kc27VdQserNaCUNiSFdDl9LWT24ly8Hpwf1ECy3aFb9m6bDhBGQYOujDm21N7EW3moKIOKEanQwe1q5BK+mA==} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@vue/reactivity@3.5.32': + resolution: {integrity: sha512-/ORasxSGvZ6MN5gc+uE364SxFdJ0+WqVG0CENXaGW58TOCdrAW76WWaplDtECeS1qphvtBZtR+3/o1g1zL4xPQ==} + + '@vue/runtime-core@3.5.32': + resolution: {integrity: sha512-pDrXCejn4UpFDFmMd27AcJEbHaLemaE5o4pbb7sLk79SRIhc6/t34BQA7SGNgYtbMnvbF/HHOftYBgFJtUoJUQ==} + + '@vue/runtime-dom@3.5.32': + resolution: {integrity: sha512-1CDVv7tv/IV13V8Nip1k/aaObVbWqRlVCVezTwx3K07p7Vxossp5JU1dcPNhJk3w347gonIUT9jQOGutyJrSVQ==} + + '@vue/server-renderer@3.5.32': + resolution: {integrity: sha512-IOjm2+JQwRFS7W28HNuJeXQle9KdZbODFY7hFGVtnnghF51ta20EWAZJHX+zLGtsHhaU6uC9BGPV52KVpYryMQ==} + peerDependencies: + vue: 3.5.32 + + '@vue/shared@3.5.32': + resolution: {integrity: sha512-ksNyrmRQzWJJ8n3cRDuSF7zNNontuJg1YHnmWRJd2AMu8Ij2bqwiiri2lH5rHtYPZjj4STkNcgcmiQqlOjiYGg==} + + '@vue/tsconfig@0.5.1': + resolution: {integrity: sha512-VcZK7MvpjuTPx2w6blwnwZAu5/LgBUtejFOi3pPGQFXQN5Ela03FUtd2Qtg4yWGGissVL0dr6Ro1LfOFh+PCuQ==} + + '@vueuse/core@10.11.1': + resolution: {integrity: sha512-guoy26JQktXPcz+0n3GukWIy/JDNKti9v6VEMu6kV2sYBsWuGiTU8OWdg+ADfUbHg3/3DlqySDe7JmdHrktiww==} + + '@vueuse/core@12.0.0': + resolution: {integrity: sha512-C12RukhXiJCbx4MGhjmd/gH52TjJsc3G0E0kQj/kb19H3Nt6n1CA4DRWuTdWWcaFRdlTe0npWDS942mvacvNBw==} + + '@vueuse/metadata@10.11.1': + resolution: {integrity: sha512-IGa5FXd003Ug1qAZmyE8wF3sJ81xGLSqTqtQ6jaVfkeZ4i5kS2mwQF61yhVqojRnenVew5PldLyRgvdl4YYuSw==} + + '@vueuse/metadata@12.0.0': + resolution: {integrity: sha512-Yzimd1D3sjxTDOlF05HekU5aSGdKjxhuhRFHA7gDWLn57PRbBIh+SF5NmjhJ0WRgF3my7T8LBucyxdFJjIfRJQ==} + + '@vueuse/shared@10.11.1': + resolution: {integrity: sha512-LHpC8711VFZlDaYUXEBbFBCQ7GS3dVU9mjOhhMhXP6txTV4EhYQg/KGnQuvt/sPAtoUKq7VVUnL6mVtFoL42sA==} + + '@vueuse/shared@12.0.0': + resolution: {integrity: sha512-3i6qtcq2PIio5i/vVYidkkcgvmTjCqrf26u+Fd4LhnbBmIT6FN8y6q/GJERp8lfcB9zVEfjdV0Br0443qZuJpw==} + + acorn-jsx@5.3.2: + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + acorn@8.16.0: + resolution: {integrity: sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==} + engines: {node: '>=0.4.0'} + hasBin: true + + ajv@6.14.0: + resolution: {integrity: sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==} + + ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + + argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + + array-union@2.1.0: + resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==} + engines: {node: '>=8'} + + async-validator@4.2.5: + resolution: {integrity: sha512-7HhHjtERjqlNbZtqNqy2rckN/SpOOlmDliet+lP7k+eKZEjPk3DgyeU9lIXLdeLz0uBbbVp+9Qdow9wJWgwwfg==} + + asynckit@0.4.0: + resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} + + axios@1.14.0: + resolution: {integrity: sha512-3Y8yrqLSwjuzpXuZ0oIYZ/XGgLwUIBU3uLvbcpb0pidD9ctpShJd43KSlEEkVQg6DS0G9NKyzOvBfUtDKEyHvQ==} + + balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + + birpc@2.9.0: + resolution: {integrity: sha512-KrayHS5pBi69Xi9JmvoqrIgYGDkD6mcSe/i6YKi3w5kekCLzrX4+nawcXqrj2tIp50Kw/mT/s3p+GVK0A0sKxw==} + + boolbase@1.0.0: + resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} + + brace-expansion@1.1.13: + resolution: {integrity: sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w==} + + brace-expansion@2.0.3: + resolution: {integrity: sha512-MCV/fYJEbqx68aE58kv2cA/kiky1G8vux3OR6/jbS+jIMe/6fJWa0DTzJU7dqijOWYwHi1t29FlfYI9uytqlpA==} + + braces@3.0.3: + resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} + engines: {node: '>=8'} + + call-bind-apply-helpers@1.0.2: + resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} + engines: {node: '>= 0.4'} + + callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + + chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + combined-stream@1.0.8: + resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} + engines: {node: '>= 0.8'} + + computeds@0.0.1: + resolution: {integrity: sha512-7CEBgcMjVmitjYo5q8JTJVra6X5mQ20uTThdK+0kR7UEaDrAWEQcRiBtWJzga4eRpP6afNwwLsX2SET2JhVB1Q==} + + concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + + copy-anything@4.0.5: + resolution: {integrity: sha512-7Vv6asjS4gMOuILabD3l739tsaxFQmC+a7pLZm02zyvs8p977bL3zEgq3yDk5rn9B0PbYgIv++jmHcuUab4RhA==} + engines: {node: '>=18'} + + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} + engines: {node: '>= 8'} + + cssesc@3.0.0: + resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} + engines: {node: '>=4'} + hasBin: true + + csstype@3.2.3: + resolution: {integrity: sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==} + + d3-color@3.1.0: + resolution: {integrity: sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==} + engines: {node: '>=12'} + + d3-dispatch@3.0.1: + resolution: {integrity: sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==} + engines: {node: '>=12'} + + d3-drag@3.0.0: + resolution: {integrity: sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==} + engines: {node: '>=12'} + + d3-ease@3.0.1: + resolution: {integrity: sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==} + engines: {node: '>=12'} + + d3-interpolate@3.0.1: + resolution: {integrity: sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==} + engines: {node: '>=12'} + + d3-selection@3.0.0: + resolution: {integrity: sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==} + engines: {node: '>=12'} + + d3-timer@3.0.1: + resolution: {integrity: sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==} + engines: {node: '>=12'} + + d3-transition@3.0.1: + resolution: {integrity: sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==} + engines: {node: '>=12'} + peerDependencies: + d3-selection: 2 - 3 + + d3-zoom@3.0.0: + resolution: {integrity: sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==} + engines: {node: '>=12'} + + dayjs@1.11.20: + resolution: {integrity: sha512-YbwwqR/uYpeoP4pu043q+LTDLFBLApUP6VxRihdfNTqu4ubqMlGDLd6ErXhEgsyvY0K6nCs7nggYumAN+9uEuQ==} + + de-indent@1.0.2: + resolution: {integrity: sha512-e/1zu3xH5MQryN2zdVaF0OrdNLUbvWxzMbi+iNA6Bky7l1RoP8a2fIbRocyHclXt/arDrrR6lL3TqFD9pMQTsg==} + + debug@4.4.3: + resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + + delayed-stream@1.0.0: + resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} + engines: {node: '>=0.4.0'} + + dir-glob@3.0.1: + resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} + engines: {node: '>=8'} + + doctrine@3.0.0: + resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==} + engines: {node: '>=6.0.0'} + + dunder-proto@1.0.1: + resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} + engines: {node: '>= 0.4'} + + element-plus@2.13.6: + resolution: {integrity: sha512-XHgwXr8Fjz6i+6BaqFhAbae/dJbG7bBAAlHrY3pWL7dpj+JcqcOyKYt4Oy5KP86FQwS1k4uIZDjCx2FyUR5lDg==} + peerDependencies: + vue: ^3.3.0 + + engine.io-client@6.6.4: + resolution: {integrity: sha512-+kjUJnZGwzewFDw951CDWcwj35vMNf2fcj7xQWOctq1F2i1jkDdVvdFG9kM/BEChymCH36KgjnW0NsL58JYRxw==} + + engine.io-parser@5.2.3: + resolution: {integrity: sha512-HqD3yTBfnBxIrbnM1DoD6Pcq8NECnh8d4As1Qgh0z5Gg3jRRIqijury0CL3ghu/edArpUYiYqQiDUQBIs4np3Q==} + engines: {node: '>=10.0.0'} + + entities@7.0.1: + resolution: {integrity: sha512-TWrgLOFUQTH994YUyl1yT4uyavY5nNB5muff+RtWaqNVCAK408b5ZnnbNAUEWLTCpum9w6arT70i1XdQ4UeOPA==} + engines: {node: '>=0.12'} + + es-define-property@1.0.1: + resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==} + engines: {node: '>= 0.4'} + + es-errors@1.3.0: + resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} + engines: {node: '>= 0.4'} + + es-object-atoms@1.1.1: + resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} + engines: {node: '>= 0.4'} + + es-set-tostringtag@2.1.0: + resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==} + engines: {node: '>= 0.4'} + + esbuild@0.21.5: + resolution: {integrity: sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==} + engines: {node: '>=12'} + hasBin: true + + escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + + eslint-config-prettier@9.1.2: + resolution: {integrity: sha512-iI1f+D2ViGn+uvv5HuHVUamg8ll4tN+JRHGc6IJi4TP9Kl976C57fzPXgseXNs8v0iA8aSJpHsTWjDb9QJamGQ==} + hasBin: true + peerDependencies: + eslint: '>=7.0.0' + + eslint-plugin-prettier@5.5.5: + resolution: {integrity: sha512-hscXkbqUZ2sPithAuLm5MXL+Wph+U7wHngPBv9OMWwlP8iaflyxpjTYZkmdgB4/vPIhemRlBEoLrH7UC1n7aUw==} + engines: {node: ^14.18.0 || >=16.0.0} + peerDependencies: + '@types/eslint': '>=8.0.0' + eslint: '>=8.0.0' + eslint-config-prettier: '>= 7.0.0 <10.0.0 || >=10.1.0' + prettier: '>=3.0.0' + peerDependenciesMeta: + '@types/eslint': + optional: true + eslint-config-prettier: + optional: true + + eslint-plugin-vue@9.33.0: + resolution: {integrity: sha512-174lJKuNsuDIlLpjeXc5E2Tss8P44uIimAfGD0b90k0NoirJqpG7stLuU9Vp/9ioTOrQdWVREc4mRd1BD+CvGw==} + engines: {node: ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.2.0 || ^7.0.0 || ^8.0.0 || ^9.0.0 + + eslint-scope@7.2.2: + resolution: {integrity: sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint-visitor-keys@3.4.3: + resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint@8.57.1: + resolution: {integrity: sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + deprecated: This version is no longer supported. Please see https://eslint.org/version-support for other options. + hasBin: true + + espree@9.6.1: + resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + esquery@1.7.0: + resolution: {integrity: sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==} + engines: {node: '>=0.10'} + + esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + + estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + + estree-walker@2.0.2: + resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} + + esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + + fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + + fast-diff@1.3.0: + resolution: {integrity: sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw==} + + fast-glob@3.3.3: + resolution: {integrity: sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==} + engines: {node: '>=8.6.0'} + + fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + + fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + + fastq@1.20.1: + resolution: {integrity: sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==} + + file-entry-cache@6.0.1: + resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} + engines: {node: ^10.12.0 || >=12.0.0} + + fill-range@7.1.1: + resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} + engines: {node: '>=8'} + + find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + + flat-cache@3.2.0: + resolution: {integrity: sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==} + engines: {node: ^10.12.0 || >=12.0.0} + + flatted@3.4.2: + resolution: {integrity: sha512-PjDse7RzhcPkIJwy5t7KPWQSZ9cAbzQXcafsetQoD7sOJRQlGikNbx7yZp2OotDnJyrDcbyRq3Ttb18iYOqkxA==} + + follow-redirects@1.15.11: + resolution: {integrity: sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==} + engines: {node: '>=4.0'} + peerDependencies: + debug: '*' + peerDependenciesMeta: + debug: + optional: true + + form-data@4.0.5: + resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} + engines: {node: '>= 6'} + + fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + + get-intrinsic@1.3.0: + resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} + engines: {node: '>= 0.4'} + + get-proto@1.0.1: + resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} + engines: {node: '>= 0.4'} + + glob-parent@5.1.2: + resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} + engines: {node: '>= 6'} + + glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + + glob@7.2.3: + resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + deprecated: Glob versions prior to v9 are no longer supported + + globals@13.24.0: + resolution: {integrity: sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==} + engines: {node: '>=8'} + + globby@11.1.0: + resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==} + engines: {node: '>=10'} + + gopd@1.2.0: + resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} + engines: {node: '>= 0.4'} + + graphemer@1.4.0: + resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} + + has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + has-symbols@1.1.0: + resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} + engines: {node: '>= 0.4'} + + has-tostringtag@1.0.2: + resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} + engines: {node: '>= 0.4'} + + hasown@2.0.2: + resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} + engines: {node: '>= 0.4'} + + he@1.2.0: + resolution: {integrity: sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==} + hasBin: true + + hookable@5.5.3: + resolution: {integrity: sha512-Yc+BQe8SvoXH1643Qez1zqLRmbA5rCL+sSmk6TVos0LWVfNIB7PGncdlId77WzLGSIB5KaWgTaNTs2lNVEI6VQ==} + + ignore@5.3.2: + resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} + engines: {node: '>= 4'} + + import-fresh@3.3.1: + resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==} + engines: {node: '>=6'} + + imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + + inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. + + inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + + is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + + is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + + is-path-inside@3.0.3: + resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==} + engines: {node: '>=8'} + + is-what@5.5.0: + resolution: {integrity: sha512-oG7cgbmg5kLYae2N5IVd3jm2s+vldjxJzK1pcu9LfpGuQ93MQSzo0okvRna+7y5ifrD+20FE8FvjusyGaz14fw==} + engines: {node: '>=18'} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + js-yaml@4.1.1: + resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==} + hasBin: true + + json-buffer@3.0.1: + resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + + json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + + json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + + keyv@4.5.4: + resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + + levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + + locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + + lodash-es@4.18.1: + resolution: {integrity: sha512-J8xewKD/Gk22OZbhpOVSwcs60zhd95ESDwezOFuA3/099925PdHJ7OFHNTGtajL3AlZkykD32HykiMo+BIBI8A==} + + lodash-unified@1.0.3: + resolution: {integrity: sha512-WK9qSozxXOD7ZJQlpSqOT+om2ZfcT4yO+03FuzAHD0wF6S0l0090LRPDx3vhTTLZ8cFKpBn+IOcVXK6qOcIlfQ==} + peerDependencies: + '@types/lodash-es': '*' + lodash: '*' + lodash-es: '*' + + lodash.merge@4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + + lodash@4.18.1: + resolution: {integrity: sha512-dMInicTPVE8d1e5otfwmmjlxkZoUpiVLwyeTdUsi/Caj/gfzzblBcCE5sRHV/AsjuCmxWrte2TNGSYuCeCq+0Q==} + + magic-string@0.30.21: + resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} + + math-intrinsics@1.1.0: + resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} + engines: {node: '>= 0.4'} + + memoize-one@6.0.0: + resolution: {integrity: sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==} + + merge2@1.4.1: + resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} + engines: {node: '>= 8'} + + micromatch@4.0.8: + resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} + engines: {node: '>=8.6'} + + mime-db@1.52.0: + resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} + engines: {node: '>= 0.6'} + + mime-types@2.1.35: + resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} + engines: {node: '>= 0.6'} + + minimatch@3.1.5: + resolution: {integrity: sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==} + + minimatch@9.0.3: + resolution: {integrity: sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==} + engines: {node: '>=16 || 14 >=14.17'} + + minimatch@9.0.9: + resolution: {integrity: sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg==} + engines: {node: '>=16 || 14 >=14.17'} + + mitt@3.0.1: + resolution: {integrity: sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw==} + + monaco-editor@0.44.0: + resolution: {integrity: sha512-5SmjNStN6bSuSE5WPT2ZV+iYn1/yI9sd4Igtk23ChvqB7kDk9lZbB9F5frsuvpB+2njdIeGGFf2G4gbE6rCC9Q==} + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + muggle-string@0.3.1: + resolution: {integrity: sha512-ckmWDJjphvd/FvZawgygcUeQCxzvohjFO5RxTjj4eq8kw359gFF3E1brjfI+viLMxss5JrHTDRHZvu2/tuy0Qg==} + + nanoid@3.3.11: + resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + + normalize-wheel-es@1.2.0: + resolution: {integrity: sha512-Wj7+EJQ8mSuXr2iWfnujrimU35R2W4FAErEyTmJoJ7ucwTn2hOUSsRehMb5RSYkxXGTM7Y9QpvPmp++w5ftoJw==} + + nth-check@2.1.1: + resolution: {integrity: sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==} + + once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + + optionator@0.9.4: + resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} + engines: {node: '>= 0.8.0'} + + p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + + p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + + parent-module@1.0.1: + resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} + engines: {node: '>=6'} + + path-browserify@1.0.1: + resolution: {integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==} + + path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + + path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + path-type@4.0.0: + resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} + engines: {node: '>=8'} + + perfect-debounce@1.0.0: + resolution: {integrity: sha512-xCy9V055GLEqoFaHoC1SoLIaLmWctgCUaBaWxDZ7/Zx4CTyX7cJQLJOok/orfjZAh9kEYpjJa4d0KcJmCbctZA==} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + picomatch@2.3.2: + resolution: {integrity: sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==} + engines: {node: '>=8.6'} + + pinia@2.3.1: + resolution: {integrity: sha512-khUlZSwt9xXCaTbbxFYBKDc/bWAGWJjOgvxETwkTN7KRm66EeT1ZdZj6i2ceh9sP2Pzqsbc704r2yngBrxBVug==} + peerDependencies: + typescript: '>=4.4.4' + vue: ^2.7.0 || ^3.5.11 + peerDependenciesMeta: + typescript: + optional: true + + postcss-selector-parser@6.1.2: + resolution: {integrity: sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==} + engines: {node: '>=4'} + + postcss@8.5.8: + resolution: {integrity: sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==} + engines: {node: ^10 || ^12 || >=14} + + prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + + prettier-linter-helpers@1.0.1: + resolution: {integrity: sha512-SxToR7P8Y2lWmv/kTzVLC1t/GDI2WGjMwNhLLE9qtH8Q13C+aEmuRlzDst4Up4s0Wc8sF2M+J57iB3cMLqftfg==} + engines: {node: '>=6.0.0'} + + prettier@3.8.1: + resolution: {integrity: sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg==} + engines: {node: '>=14'} + hasBin: true + + property-expr@2.0.6: + resolution: {integrity: sha512-SVtmxhRE/CGkn3eZY1T6pC8Nln6Fr/lu1mKSgRud0eC73whjGfoAogbn78LkD8aFL0zz3bAFerKSnOl7NlErBA==} + + proxy-from-env@2.1.0: + resolution: {integrity: sha512-cJ+oHTW1VAEa8cJslgmUZrc+sjRKgAKl3Zyse6+PV38hZe/V6Z14TbCuXcan9F9ghlz4QrFr2c92TNF82UkYHA==} + engines: {node: '>=10'} + + punycode@2.3.1: + resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} + engines: {node: '>=6'} + + queue-microtask@1.2.3: + resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + + resolve-from@4.0.0: + resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} + engines: {node: '>=4'} + + reusify@1.1.0: + resolution: {integrity: sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==} + engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + + rfdc@1.4.1: + resolution: {integrity: sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA==} + + rimraf@3.0.2: + resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==} + deprecated: Rimraf versions prior to v4 are no longer supported + hasBin: true + + rollup@4.60.1: + resolution: {integrity: sha512-VmtB2rFU/GroZ4oL8+ZqXgSA38O6GR8KSIvWmEFv63pQ0G6KaBH9s07PO8XTXP4vI+3UJUEypOfjkGfmSBBR0w==} + engines: {node: '>=18.0.0', npm: '>=8.0.0'} + hasBin: true + + run-parallel@1.2.0: + resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + + semver@7.7.4: + resolution: {integrity: sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==} + engines: {node: '>=10'} + hasBin: true + + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + slash@3.0.0: + resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==} + engines: {node: '>=8'} + + socket.io-client@4.8.3: + resolution: {integrity: sha512-uP0bpjWrjQmUt5DTHq9RuoCBdFJF10cdX9X+a368j/Ft0wmaVgxlrjvK3kjvgCODOMMOz9lcaRzxmso0bTWZ/g==} + engines: {node: '>=10.0.0'} + + socket.io-parser@4.2.6: + resolution: {integrity: sha512-asJqbVBDsBCJx0pTqw3WfesSY0iRX+2xzWEWzrpcH7L6fLzrhyF8WPI8UaeM4YCuDfpwA/cgsdugMsmtz8EJeg==} + engines: {node: '>=10.0.0'} + + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} + engines: {node: '>=0.10.0'} + + speakingurl@14.0.1: + resolution: {integrity: sha512-1POYv7uv2gXoyGFpBCmpDVSNV74IfsWlDW216UPjbWufNf+bSU6GdbDsxdcxtfwb4xlI3yxzOTKClUosxARYrQ==} + engines: {node: '>=0.10.0'} + + strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + + strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + + superjson@2.2.6: + resolution: {integrity: sha512-H+ue8Zo4vJmV2nRjpx86P35lzwDT3nItnIsocgumgr0hHMQ+ZGq5vrERg9kJBo5AWGmxZDhzDo+WVIJqkB0cGA==} + engines: {node: '>=16'} + + supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + + synckit@0.11.12: + resolution: {integrity: sha512-Bh7QjT8/SuKUIfObSXNHNSK6WHo6J1tHCqJsuaFDP7gP0fkzSfTxI8y85JrppZ0h8l0maIgc2tfuZQ6/t3GtnQ==} + engines: {node: ^14.18.0 || >=16.0.0} + + text-table@0.2.0: + resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} + + tiny-case@1.0.3: + resolution: {integrity: sha512-Eet/eeMhkO6TX8mnUteS9zgPbUMQa4I6Kkp5ORiBD5476/m+PIRiumP5tmh5ioJpH7k51Kehawy2UDfsnxxY8Q==} + + to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + + toposort@2.0.2: + resolution: {integrity: sha512-0a5EOkAUp8D4moMi2W8ZF8jcga7BgZd91O/yabJCFY8az+XSzeGyTKs0Aoo897iV1Nj6guFq8orWDS96z91oGg==} + + ts-api-utils@1.4.3: + resolution: {integrity: sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw==} + engines: {node: '>=16'} + peerDependencies: + typescript: '>=4.2.0' + + type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + + type-fest@0.20.2: + resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==} + engines: {node: '>=10'} + + type-fest@2.19.0: + resolution: {integrity: sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==} + engines: {node: '>=12.20'} + + type-fest@4.41.0: + resolution: {integrity: sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==} + engines: {node: '>=16'} + + typescript@5.3.3: + resolution: {integrity: sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw==} + engines: {node: '>=14.17'} + hasBin: true + + undici-types@6.21.0: + resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} + + uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + + util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + vee-validate@4.15.1: + resolution: {integrity: sha512-DkFsiTwEKau8VIxyZBGdO6tOudD+QoUBPuHj3e6QFqmbfCRj1ArmYWue9lEp6jLSWBIw4XPlDLjFIZNLdRAMSg==} + peerDependencies: + vue: ^3.4.26 + + vite@5.4.21: + resolution: {integrity: sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==} + engines: {node: ^18.0.0 || >=20.0.0} + hasBin: true + peerDependencies: + '@types/node': ^18.0.0 || >=20.0.0 + less: '*' + lightningcss: ^1.21.0 + sass: '*' + sass-embedded: '*' + stylus: '*' + sugarss: '*' + terser: ^5.4.0 + peerDependenciesMeta: + '@types/node': + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + + vue-component-type-helpers@3.2.6: + resolution: {integrity: sha512-O02tnvIfOQVmnvoWwuSydwRoHjZVt8UEBR+2p4rT35p8GAy5VTlWP8o5qXfJR/GWCN0nVZoYWsVUvx2jwgdBmQ==} + + vue-demi@0.14.10: + resolution: {integrity: sha512-nMZBOwuzabUO0nLgIcc6rycZEebF6eeUfaiQx9+WSk8e29IbLvPU9feI6tqW4kTo3hvoYAJkMh8n8D0fuISphg==} + engines: {node: '>=12'} + hasBin: true + peerDependencies: + '@vue/composition-api': ^1.0.0-rc.1 + vue: ^3.0.0-0 || ^2.6.0 + peerDependenciesMeta: + '@vue/composition-api': + optional: true + + vue-eslint-parser@9.4.3: + resolution: {integrity: sha512-2rYRLWlIpaiN8xbPiDyXZXRgLGOtWxERV7ND5fFAv5qo1D2N9Fu9MNajBNc6o13lZ+24DAWCkQCvj4klgmcITg==} + engines: {node: ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: '>=6.0.0' + + vue-router@4.6.4: + resolution: {integrity: sha512-Hz9q5sa33Yhduglwz6g9skT8OBPii+4bFn88w6J+J4MfEo4KRRpmiNG/hHHkdbRFlLBOqxN8y8gf2Fb0MTUgVg==} + peerDependencies: + vue: ^3.5.0 + + vue-template-compiler@2.7.16: + resolution: {integrity: sha512-AYbUWAJHLGGQM7+cNTELw+KsOG9nl2CnSv467WobS5Cv9uk3wFcnr1Etsz2sEIHEZvw1U+o9mRlEO6QbZvUPGQ==} + + vue-tsc@1.8.27: + resolution: {integrity: sha512-WesKCAZCRAbmmhuGl3+VrdWItEvfoFIPXOvUJkjULi+x+6G/Dy69yO3TBRJDr9eUlmsNAwVmxsNZxvHKzbkKdg==} + hasBin: true + peerDependencies: + typescript: '*' + + vue@3.5.32: + resolution: {integrity: sha512-vM4z4Q9tTafVfMAK7IVzmxg34rSzTFMyIe0UUEijUCkn9+23lj0WRfA83dg7eQZIUlgOSGrkViIaCfqSAUXsMw==} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + word-wrap@1.2.5: + resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} + engines: {node: '>=0.10.0'} + + wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + + ws@8.18.3: + resolution: {integrity: sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==} + engines: {node: '>=10.0.0'} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: '>=5.0.2' + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + + xml-name-validator@4.0.0: + resolution: {integrity: sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==} + engines: {node: '>=12'} + + xmlhttprequest-ssl@2.1.2: + resolution: {integrity: sha512-TEU+nJVUUnA4CYJFLvK5X9AOeH4KvDvhIfm0vV1GaQRtchnG0hgK5p8hw/xjv8cunWYCsiPCSDzObPyhEwq3KQ==} + engines: {node: '>=0.4.0'} + + yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + + yup@1.7.1: + resolution: {integrity: sha512-GKHFX2nXul2/4Dtfxhozv701jLQHdf6J34YDh2cEkpqoo8le5Mg6/LrdseVLrFarmFygZTlfIhHx/QKfb/QWXw==} + +snapshots: + + '@babel/helper-string-parser@7.27.1': {} + + '@babel/helper-validator-identifier@7.28.5': {} + + '@babel/parser@7.29.2': + dependencies: + '@babel/types': 7.29.0 + + '@babel/types@7.29.0': + dependencies: + '@babel/helper-string-parser': 7.27.1 + '@babel/helper-validator-identifier': 7.28.5 + + '@ctrl/tinycolor@4.2.0': {} + + '@element-plus/icons-vue@2.3.2(vue@3.5.32(typescript@5.3.3))': + dependencies: + vue: 3.5.32(typescript@5.3.3) + + '@esbuild/aix-ppc64@0.21.5': + optional: true + + '@esbuild/android-arm64@0.21.5': + optional: true + + '@esbuild/android-arm@0.21.5': + optional: true + + '@esbuild/android-x64@0.21.5': + optional: true + + '@esbuild/darwin-arm64@0.21.5': + optional: true + + '@esbuild/darwin-x64@0.21.5': + optional: true + + '@esbuild/freebsd-arm64@0.21.5': + optional: true + + '@esbuild/freebsd-x64@0.21.5': + optional: true + + '@esbuild/linux-arm64@0.21.5': + optional: true + + '@esbuild/linux-arm@0.21.5': + optional: true + + '@esbuild/linux-ia32@0.21.5': + optional: true + + '@esbuild/linux-loong64@0.21.5': + optional: true + + '@esbuild/linux-mips64el@0.21.5': + optional: true + + '@esbuild/linux-ppc64@0.21.5': + optional: true + + '@esbuild/linux-riscv64@0.21.5': + optional: true + + '@esbuild/linux-s390x@0.21.5': + optional: true + + '@esbuild/linux-x64@0.21.5': + optional: true + + '@esbuild/netbsd-x64@0.21.5': + optional: true + + '@esbuild/openbsd-x64@0.21.5': + optional: true + + '@esbuild/sunos-x64@0.21.5': + optional: true + + '@esbuild/win32-arm64@0.21.5': + optional: true + + '@esbuild/win32-ia32@0.21.5': + optional: true + + '@esbuild/win32-x64@0.21.5': + optional: true + + '@eslint-community/eslint-utils@4.9.1(eslint@8.57.1)': + dependencies: + eslint: 8.57.1 + eslint-visitor-keys: 3.4.3 + + '@eslint-community/regexpp@4.12.2': {} + + '@eslint/eslintrc@2.1.4': + dependencies: + ajv: 6.14.0 + debug: 4.4.3 + espree: 9.6.1 + globals: 13.24.0 + ignore: 5.3.2 + import-fresh: 3.3.1 + js-yaml: 4.1.1 + minimatch: 3.1.5 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + + '@eslint/js@8.57.1': {} + + '@floating-ui/core@1.7.5': + dependencies: + '@floating-ui/utils': 0.2.11 + + '@floating-ui/dom@1.7.6': + dependencies: + '@floating-ui/core': 1.7.5 + '@floating-ui/utils': 0.2.11 + + '@floating-ui/utils@0.2.11': {} + + '@humanwhocodes/config-array@0.13.0': + dependencies: + '@humanwhocodes/object-schema': 2.0.3 + debug: 4.4.3 + minimatch: 3.1.5 + transitivePeerDependencies: + - supports-color + + '@humanwhocodes/module-importer@1.0.1': {} + + '@humanwhocodes/object-schema@2.0.3': {} + + '@jridgewell/sourcemap-codec@1.5.5': {} + + '@nodelib/fs.scandir@2.1.5': + dependencies: + '@nodelib/fs.stat': 2.0.5 + run-parallel: 1.2.0 + + '@nodelib/fs.stat@2.0.5': {} + + '@nodelib/fs.walk@1.2.8': + dependencies: + '@nodelib/fs.scandir': 2.1.5 + fastq: 1.20.1 + + '@pkgr/core@0.2.9': {} + + '@rollup/rollup-android-arm-eabi@4.60.1': + optional: true + + '@rollup/rollup-android-arm64@4.60.1': + optional: true + + '@rollup/rollup-darwin-arm64@4.60.1': + optional: true + + '@rollup/rollup-darwin-x64@4.60.1': + optional: true + + '@rollup/rollup-freebsd-arm64@4.60.1': + optional: true + + '@rollup/rollup-freebsd-x64@4.60.1': + optional: true + + '@rollup/rollup-linux-arm-gnueabihf@4.60.1': + optional: true + + '@rollup/rollup-linux-arm-musleabihf@4.60.1': + optional: true + + '@rollup/rollup-linux-arm64-gnu@4.60.1': + optional: true + + '@rollup/rollup-linux-arm64-musl@4.60.1': + optional: true + + '@rollup/rollup-linux-loong64-gnu@4.60.1': + optional: true + + '@rollup/rollup-linux-loong64-musl@4.60.1': + optional: true + + '@rollup/rollup-linux-ppc64-gnu@4.60.1': + optional: true + + '@rollup/rollup-linux-ppc64-musl@4.60.1': + optional: true + + '@rollup/rollup-linux-riscv64-gnu@4.60.1': + optional: true + + '@rollup/rollup-linux-riscv64-musl@4.60.1': + optional: true + + '@rollup/rollup-linux-s390x-gnu@4.60.1': + optional: true + + '@rollup/rollup-linux-x64-gnu@4.60.1': + optional: true + + '@rollup/rollup-linux-x64-musl@4.60.1': + optional: true + + '@rollup/rollup-openbsd-x64@4.60.1': + optional: true + + '@rollup/rollup-openharmony-arm64@4.60.1': + optional: true + + '@rollup/rollup-win32-arm64-msvc@4.60.1': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.60.1': + optional: true + + '@rollup/rollup-win32-x64-gnu@4.60.1': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.60.1': + optional: true + + '@socket.io/component-emitter@3.1.2': {} + + '@sxzz/popperjs-es@2.11.8': {} + + '@types/estree@1.0.8': {} + + '@types/json-schema@7.0.15': {} + + '@types/lodash-es@4.17.12': + dependencies: + '@types/lodash': 4.17.24 + + '@types/lodash@4.17.24': {} + + '@types/node@20.19.39': + dependencies: + undici-types: 6.21.0 + + '@types/semver@7.7.1': {} + + '@types/web-bluetooth@0.0.20': {} + + '@typescript-eslint/eslint-plugin@6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.3.3))(eslint@8.57.1)(typescript@5.3.3)': + dependencies: + '@eslint-community/regexpp': 4.12.2 + '@typescript-eslint/parser': 6.21.0(eslint@8.57.1)(typescript@5.3.3) + '@typescript-eslint/scope-manager': 6.21.0 + '@typescript-eslint/type-utils': 6.21.0(eslint@8.57.1)(typescript@5.3.3) + '@typescript-eslint/utils': 6.21.0(eslint@8.57.1)(typescript@5.3.3) + '@typescript-eslint/visitor-keys': 6.21.0 + debug: 4.4.3 + eslint: 8.57.1 + graphemer: 1.4.0 + ignore: 5.3.2 + natural-compare: 1.4.0 + semver: 7.7.4 + ts-api-utils: 1.4.3(typescript@5.3.3) + optionalDependencies: + typescript: 5.3.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.3.3)': + dependencies: + '@typescript-eslint/scope-manager': 6.21.0 + '@typescript-eslint/types': 6.21.0 + '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.3.3) + '@typescript-eslint/visitor-keys': 6.21.0 + debug: 4.4.3 + eslint: 8.57.1 + optionalDependencies: + typescript: 5.3.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/scope-manager@6.21.0': + dependencies: + '@typescript-eslint/types': 6.21.0 + '@typescript-eslint/visitor-keys': 6.21.0 + + '@typescript-eslint/type-utils@6.21.0(eslint@8.57.1)(typescript@5.3.3)': + dependencies: + '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.3.3) + '@typescript-eslint/utils': 6.21.0(eslint@8.57.1)(typescript@5.3.3) + debug: 4.4.3 + eslint: 8.57.1 + ts-api-utils: 1.4.3(typescript@5.3.3) + optionalDependencies: + typescript: 5.3.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/types@6.21.0': {} + + '@typescript-eslint/typescript-estree@6.21.0(typescript@5.3.3)': + dependencies: + '@typescript-eslint/types': 6.21.0 + '@typescript-eslint/visitor-keys': 6.21.0 + debug: 4.4.3 + globby: 11.1.0 + is-glob: 4.0.3 + minimatch: 9.0.3 + semver: 7.7.4 + ts-api-utils: 1.4.3(typescript@5.3.3) + optionalDependencies: + typescript: 5.3.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/utils@6.21.0(eslint@8.57.1)(typescript@5.3.3)': + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@8.57.1) + '@types/json-schema': 7.0.15 + '@types/semver': 7.7.1 + '@typescript-eslint/scope-manager': 6.21.0 + '@typescript-eslint/types': 6.21.0 + '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.3.3) + eslint: 8.57.1 + semver: 7.7.4 + transitivePeerDependencies: + - supports-color + - typescript + + '@typescript-eslint/visitor-keys@6.21.0': + dependencies: + '@typescript-eslint/types': 6.21.0 + eslint-visitor-keys: 3.4.3 + + '@ungap/structured-clone@1.3.0': {} + + '@vitejs/plugin-vue@4.6.2(vite@5.4.21(@types/node@20.19.39))(vue@3.5.32(typescript@5.3.3))': + dependencies: + vite: 5.4.21(@types/node@20.19.39) + vue: 3.5.32(typescript@5.3.3) + + '@volar/language-core@1.11.1': + dependencies: + '@volar/source-map': 1.11.1 + + '@volar/source-map@1.11.1': + dependencies: + muggle-string: 0.3.1 + + '@volar/typescript@1.11.1': + dependencies: + '@volar/language-core': 1.11.1 + path-browserify: 1.0.1 + + '@vue-flow/background@1.3.2(@vue-flow/core@1.48.2(vue@3.5.32(typescript@5.3.3)))(vue@3.5.32(typescript@5.3.3))': + dependencies: + '@vue-flow/core': 1.48.2(vue@3.5.32(typescript@5.3.3)) + vue: 3.5.32(typescript@5.3.3) + + '@vue-flow/controls@1.1.3(@vue-flow/core@1.48.2(vue@3.5.32(typescript@5.3.3)))(vue@3.5.32(typescript@5.3.3))': + dependencies: + '@vue-flow/core': 1.48.2(vue@3.5.32(typescript@5.3.3)) + vue: 3.5.32(typescript@5.3.3) + + '@vue-flow/core@1.48.2(vue@3.5.32(typescript@5.3.3))': + dependencies: + '@vueuse/core': 10.11.1(vue@3.5.32(typescript@5.3.3)) + d3-drag: 3.0.0 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-zoom: 3.0.0 + vue: 3.5.32(typescript@5.3.3) + transitivePeerDependencies: + - '@vue/composition-api' + + '@vue-flow/minimap@1.5.4(@vue-flow/core@1.48.2(vue@3.5.32(typescript@5.3.3)))(vue@3.5.32(typescript@5.3.3))': + dependencies: + '@vue-flow/core': 1.48.2(vue@3.5.32(typescript@5.3.3)) + d3-selection: 3.0.0 + d3-zoom: 3.0.0 + vue: 3.5.32(typescript@5.3.3) + + '@vue/compiler-core@3.5.32': + dependencies: + '@babel/parser': 7.29.2 + '@vue/shared': 3.5.32 + entities: 7.0.1 + estree-walker: 2.0.2 + source-map-js: 1.2.1 + + '@vue/compiler-dom@3.5.32': + dependencies: + '@vue/compiler-core': 3.5.32 + '@vue/shared': 3.5.32 + + '@vue/compiler-sfc@3.5.32': + dependencies: + '@babel/parser': 7.29.2 + '@vue/compiler-core': 3.5.32 + '@vue/compiler-dom': 3.5.32 + '@vue/compiler-ssr': 3.5.32 + '@vue/shared': 3.5.32 + estree-walker: 2.0.2 + magic-string: 0.30.21 + postcss: 8.5.8 + source-map-js: 1.2.1 + + '@vue/compiler-ssr@3.5.32': + dependencies: + '@vue/compiler-dom': 3.5.32 + '@vue/shared': 3.5.32 + + '@vue/devtools-api@6.6.4': {} + + '@vue/devtools-api@7.7.9': + dependencies: + '@vue/devtools-kit': 7.7.9 + + '@vue/devtools-kit@7.7.9': + dependencies: + '@vue/devtools-shared': 7.7.9 + birpc: 2.9.0 + hookable: 5.5.3 + mitt: 3.0.1 + perfect-debounce: 1.0.0 + speakingurl: 14.0.1 + superjson: 2.2.6 + + '@vue/devtools-shared@7.7.9': + dependencies: + rfdc: 1.4.1 + + '@vue/eslint-config-prettier@9.0.0(eslint@8.57.1)(prettier@3.8.1)': + dependencies: + eslint: 8.57.1 + eslint-config-prettier: 9.1.2(eslint@8.57.1) + eslint-plugin-prettier: 5.5.5(eslint-config-prettier@9.1.2(eslint@8.57.1))(eslint@8.57.1)(prettier@3.8.1) + prettier: 3.8.1 + transitivePeerDependencies: + - '@types/eslint' + + '@vue/eslint-config-typescript@12.0.0(eslint-plugin-vue@9.33.0(eslint@8.57.1))(eslint@8.57.1)(typescript@5.3.3)': + dependencies: + '@typescript-eslint/eslint-plugin': 6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.3.3))(eslint@8.57.1)(typescript@5.3.3) + '@typescript-eslint/parser': 6.21.0(eslint@8.57.1)(typescript@5.3.3) + eslint: 8.57.1 + eslint-plugin-vue: 9.33.0(eslint@8.57.1) + vue-eslint-parser: 9.4.3(eslint@8.57.1) + optionalDependencies: + typescript: 5.3.3 + transitivePeerDependencies: + - supports-color + + '@vue/language-core@1.8.27(typescript@5.3.3)': + dependencies: + '@volar/language-core': 1.11.1 + '@volar/source-map': 1.11.1 + '@vue/compiler-dom': 3.5.32 + '@vue/shared': 3.5.32 + computeds: 0.0.1 + minimatch: 9.0.9 + muggle-string: 0.3.1 + path-browserify: 1.0.1 + vue-template-compiler: 2.7.16 + optionalDependencies: + typescript: 5.3.3 + + '@vue/reactivity@3.5.32': + dependencies: + '@vue/shared': 3.5.32 + + '@vue/runtime-core@3.5.32': + dependencies: + '@vue/reactivity': 3.5.32 + '@vue/shared': 3.5.32 + + '@vue/runtime-dom@3.5.32': + dependencies: + '@vue/reactivity': 3.5.32 + '@vue/runtime-core': 3.5.32 + '@vue/shared': 3.5.32 + csstype: 3.2.3 + + '@vue/server-renderer@3.5.32(vue@3.5.32(typescript@5.3.3))': + dependencies: + '@vue/compiler-ssr': 3.5.32 + '@vue/shared': 3.5.32 + vue: 3.5.32(typescript@5.3.3) + + '@vue/shared@3.5.32': {} + + '@vue/tsconfig@0.5.1': {} + + '@vueuse/core@10.11.1(vue@3.5.32(typescript@5.3.3))': + dependencies: + '@types/web-bluetooth': 0.0.20 + '@vueuse/metadata': 10.11.1 + '@vueuse/shared': 10.11.1(vue@3.5.32(typescript@5.3.3)) + vue-demi: 0.14.10(vue@3.5.32(typescript@5.3.3)) + transitivePeerDependencies: + - '@vue/composition-api' + - vue + + '@vueuse/core@12.0.0(typescript@5.3.3)': + dependencies: + '@types/web-bluetooth': 0.0.20 + '@vueuse/metadata': 12.0.0 + '@vueuse/shared': 12.0.0(typescript@5.3.3) + vue: 3.5.32(typescript@5.3.3) + transitivePeerDependencies: + - typescript + + '@vueuse/metadata@10.11.1': {} + + '@vueuse/metadata@12.0.0': {} + + '@vueuse/shared@10.11.1(vue@3.5.32(typescript@5.3.3))': + dependencies: + vue-demi: 0.14.10(vue@3.5.32(typescript@5.3.3)) + transitivePeerDependencies: + - '@vue/composition-api' + - vue + + '@vueuse/shared@12.0.0(typescript@5.3.3)': + dependencies: + vue: 3.5.32(typescript@5.3.3) + transitivePeerDependencies: + - typescript + + acorn-jsx@5.3.2(acorn@8.16.0): + dependencies: + acorn: 8.16.0 + + acorn@8.16.0: {} + + ajv@6.14.0: + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + + ansi-regex@5.0.1: {} + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + argparse@2.0.1: {} + + array-union@2.1.0: {} + + async-validator@4.2.5: {} + + asynckit@0.4.0: {} + + axios@1.14.0: + dependencies: + follow-redirects: 1.15.11 + form-data: 4.0.5 + proxy-from-env: 2.1.0 + transitivePeerDependencies: + - debug + + balanced-match@1.0.2: {} + + birpc@2.9.0: {} + + boolbase@1.0.0: {} + + brace-expansion@1.1.13: + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + brace-expansion@2.0.3: + dependencies: + balanced-match: 1.0.2 + + braces@3.0.3: + dependencies: + fill-range: 7.1.1 + + call-bind-apply-helpers@1.0.2: + dependencies: + es-errors: 1.3.0 + function-bind: 1.1.2 + + callsites@3.1.0: {} + + chalk@4.1.2: + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.4: {} + + combined-stream@1.0.8: + dependencies: + delayed-stream: 1.0.0 + + computeds@0.0.1: {} + + concat-map@0.0.1: {} + + copy-anything@4.0.5: + dependencies: + is-what: 5.5.0 + + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + cssesc@3.0.0: {} + + csstype@3.2.3: {} + + d3-color@3.1.0: {} + + d3-dispatch@3.0.1: {} + + d3-drag@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-selection: 3.0.0 + + d3-ease@3.0.1: {} + + d3-interpolate@3.0.1: + dependencies: + d3-color: 3.1.0 + + d3-selection@3.0.0: {} + + d3-timer@3.0.1: {} + + d3-transition@3.0.1(d3-selection@3.0.0): + dependencies: + d3-color: 3.1.0 + d3-dispatch: 3.0.1 + d3-ease: 3.0.1 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-timer: 3.0.1 + + d3-zoom@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-transition: 3.0.1(d3-selection@3.0.0) + + dayjs@1.11.20: {} + + de-indent@1.0.2: {} + + debug@4.4.3: + dependencies: + ms: 2.1.3 + + deep-is@0.1.4: {} + + delayed-stream@1.0.0: {} + + dir-glob@3.0.1: + dependencies: + path-type: 4.0.0 + + doctrine@3.0.0: + dependencies: + esutils: 2.0.3 + + dunder-proto@1.0.1: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-errors: 1.3.0 + gopd: 1.2.0 + + element-plus@2.13.6(typescript@5.3.3)(vue@3.5.32(typescript@5.3.3)): + dependencies: + '@ctrl/tinycolor': 4.2.0 + '@element-plus/icons-vue': 2.3.2(vue@3.5.32(typescript@5.3.3)) + '@floating-ui/dom': 1.7.6 + '@popperjs/core': '@sxzz/popperjs-es@2.11.8' + '@types/lodash': 4.17.24 + '@types/lodash-es': 4.17.12 + '@vueuse/core': 12.0.0(typescript@5.3.3) + async-validator: 4.2.5 + dayjs: 1.11.20 + lodash: 4.18.1 + lodash-es: 4.18.1 + lodash-unified: 1.0.3(@types/lodash-es@4.17.12)(lodash-es@4.18.1)(lodash@4.18.1) + memoize-one: 6.0.0 + normalize-wheel-es: 1.2.0 + vue: 3.5.32(typescript@5.3.3) + vue-component-type-helpers: 3.2.6 + transitivePeerDependencies: + - typescript + + engine.io-client@6.6.4: + dependencies: + '@socket.io/component-emitter': 3.1.2 + debug: 4.4.3 + engine.io-parser: 5.2.3 + ws: 8.18.3 + xmlhttprequest-ssl: 2.1.2 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + + engine.io-parser@5.2.3: {} + + entities@7.0.1: {} + + es-define-property@1.0.1: {} + + es-errors@1.3.0: {} + + es-object-atoms@1.1.1: + dependencies: + es-errors: 1.3.0 + + es-set-tostringtag@2.1.0: + dependencies: + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + has-tostringtag: 1.0.2 + hasown: 2.0.2 + + esbuild@0.21.5: + optionalDependencies: + '@esbuild/aix-ppc64': 0.21.5 + '@esbuild/android-arm': 0.21.5 + '@esbuild/android-arm64': 0.21.5 + '@esbuild/android-x64': 0.21.5 + '@esbuild/darwin-arm64': 0.21.5 + '@esbuild/darwin-x64': 0.21.5 + '@esbuild/freebsd-arm64': 0.21.5 + '@esbuild/freebsd-x64': 0.21.5 + '@esbuild/linux-arm': 0.21.5 + '@esbuild/linux-arm64': 0.21.5 + '@esbuild/linux-ia32': 0.21.5 + '@esbuild/linux-loong64': 0.21.5 + '@esbuild/linux-mips64el': 0.21.5 + '@esbuild/linux-ppc64': 0.21.5 + '@esbuild/linux-riscv64': 0.21.5 + '@esbuild/linux-s390x': 0.21.5 + '@esbuild/linux-x64': 0.21.5 + '@esbuild/netbsd-x64': 0.21.5 + '@esbuild/openbsd-x64': 0.21.5 + '@esbuild/sunos-x64': 0.21.5 + '@esbuild/win32-arm64': 0.21.5 + '@esbuild/win32-ia32': 0.21.5 + '@esbuild/win32-x64': 0.21.5 + + escape-string-regexp@4.0.0: {} + + eslint-config-prettier@9.1.2(eslint@8.57.1): + dependencies: + eslint: 8.57.1 + + eslint-plugin-prettier@5.5.5(eslint-config-prettier@9.1.2(eslint@8.57.1))(eslint@8.57.1)(prettier@3.8.1): + dependencies: + eslint: 8.57.1 + prettier: 3.8.1 + prettier-linter-helpers: 1.0.1 + synckit: 0.11.12 + optionalDependencies: + eslint-config-prettier: 9.1.2(eslint@8.57.1) + + eslint-plugin-vue@9.33.0(eslint@8.57.1): + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@8.57.1) + eslint: 8.57.1 + globals: 13.24.0 + natural-compare: 1.4.0 + nth-check: 2.1.1 + postcss-selector-parser: 6.1.2 + semver: 7.7.4 + vue-eslint-parser: 9.4.3(eslint@8.57.1) + xml-name-validator: 4.0.0 + transitivePeerDependencies: + - supports-color + + eslint-scope@7.2.2: + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + + eslint-visitor-keys@3.4.3: {} + + eslint@8.57.1: + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@8.57.1) + '@eslint-community/regexpp': 4.12.2 + '@eslint/eslintrc': 2.1.4 + '@eslint/js': 8.57.1 + '@humanwhocodes/config-array': 0.13.0 + '@humanwhocodes/module-importer': 1.0.1 + '@nodelib/fs.walk': 1.2.8 + '@ungap/structured-clone': 1.3.0 + ajv: 6.14.0 + chalk: 4.1.2 + cross-spawn: 7.0.6 + debug: 4.4.3 + doctrine: 3.0.0 + escape-string-regexp: 4.0.0 + eslint-scope: 7.2.2 + eslint-visitor-keys: 3.4.3 + espree: 9.6.1 + esquery: 1.7.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 6.0.1 + find-up: 5.0.0 + glob-parent: 6.0.2 + globals: 13.24.0 + graphemer: 1.4.0 + ignore: 5.3.2 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + is-path-inside: 3.0.3 + js-yaml: 4.1.1 + json-stable-stringify-without-jsonify: 1.0.1 + levn: 0.4.1 + lodash.merge: 4.6.2 + minimatch: 3.1.5 + natural-compare: 1.4.0 + optionator: 0.9.4 + strip-ansi: 6.0.1 + text-table: 0.2.0 + transitivePeerDependencies: + - supports-color + + espree@9.6.1: + dependencies: + acorn: 8.16.0 + acorn-jsx: 5.3.2(acorn@8.16.0) + eslint-visitor-keys: 3.4.3 + + esquery@1.7.0: + dependencies: + estraverse: 5.3.0 + + esrecurse@4.3.0: + dependencies: + estraverse: 5.3.0 + + estraverse@5.3.0: {} + + estree-walker@2.0.2: {} + + esutils@2.0.3: {} + + fast-deep-equal@3.1.3: {} + + fast-diff@1.3.0: {} + + fast-glob@3.3.3: + dependencies: + '@nodelib/fs.stat': 2.0.5 + '@nodelib/fs.walk': 1.2.8 + glob-parent: 5.1.2 + merge2: 1.4.1 + micromatch: 4.0.8 + + fast-json-stable-stringify@2.1.0: {} + + fast-levenshtein@2.0.6: {} + + fastq@1.20.1: + dependencies: + reusify: 1.1.0 + + file-entry-cache@6.0.1: + dependencies: + flat-cache: 3.2.0 + + fill-range@7.1.1: + dependencies: + to-regex-range: 5.0.1 + + find-up@5.0.0: + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + + flat-cache@3.2.0: + dependencies: + flatted: 3.4.2 + keyv: 4.5.4 + rimraf: 3.0.2 + + flatted@3.4.2: {} + + follow-redirects@1.15.11: {} + + form-data@4.0.5: + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + es-set-tostringtag: 2.1.0 + hasown: 2.0.2 + mime-types: 2.1.35 + + fs.realpath@1.0.0: {} + + fsevents@2.3.3: + optional: true + + function-bind@1.1.2: {} + + get-intrinsic@1.3.0: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-define-property: 1.0.1 + es-errors: 1.3.0 + es-object-atoms: 1.1.1 + function-bind: 1.1.2 + get-proto: 1.0.1 + gopd: 1.2.0 + has-symbols: 1.1.0 + hasown: 2.0.2 + math-intrinsics: 1.1.0 + + get-proto@1.0.1: + dependencies: + dunder-proto: 1.0.1 + es-object-atoms: 1.1.1 + + glob-parent@5.1.2: + dependencies: + is-glob: 4.0.3 + + glob-parent@6.0.2: + dependencies: + is-glob: 4.0.3 + + glob@7.2.3: + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.5 + once: 1.4.0 + path-is-absolute: 1.0.1 + + globals@13.24.0: + dependencies: + type-fest: 0.20.2 + + globby@11.1.0: + dependencies: + array-union: 2.1.0 + dir-glob: 3.0.1 + fast-glob: 3.3.3 + ignore: 5.3.2 + merge2: 1.4.1 + slash: 3.0.0 + + gopd@1.2.0: {} + + graphemer@1.4.0: {} + + has-flag@4.0.0: {} + + has-symbols@1.1.0: {} + + has-tostringtag@1.0.2: + dependencies: + has-symbols: 1.1.0 + + hasown@2.0.2: + dependencies: + function-bind: 1.1.2 + + he@1.2.0: {} + + hookable@5.5.3: {} + + ignore@5.3.2: {} + + import-fresh@3.3.1: + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + + imurmurhash@0.1.4: {} + + inflight@1.0.6: + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + + inherits@2.0.4: {} + + is-extglob@2.1.1: {} + + is-glob@4.0.3: + dependencies: + is-extglob: 2.1.1 + + is-number@7.0.0: {} + + is-path-inside@3.0.3: {} + + is-what@5.5.0: {} + + isexe@2.0.0: {} + + js-yaml@4.1.1: + dependencies: + argparse: 2.0.1 + + json-buffer@3.0.1: {} + + json-schema-traverse@0.4.1: {} + + json-stable-stringify-without-jsonify@1.0.1: {} + + keyv@4.5.4: + dependencies: + json-buffer: 3.0.1 + + levn@0.4.1: + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + + locate-path@6.0.0: + dependencies: + p-locate: 5.0.0 + + lodash-es@4.18.1: {} + + lodash-unified@1.0.3(@types/lodash-es@4.17.12)(lodash-es@4.18.1)(lodash@4.18.1): + dependencies: + '@types/lodash-es': 4.17.12 + lodash: 4.18.1 + lodash-es: 4.18.1 + + lodash.merge@4.6.2: {} + + lodash@4.18.1: {} + + magic-string@0.30.21: + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + + math-intrinsics@1.1.0: {} + + memoize-one@6.0.0: {} + + merge2@1.4.1: {} + + micromatch@4.0.8: + dependencies: + braces: 3.0.3 + picomatch: 2.3.2 + + mime-db@1.52.0: {} + + mime-types@2.1.35: + dependencies: + mime-db: 1.52.0 + + minimatch@3.1.5: + dependencies: + brace-expansion: 1.1.13 + + minimatch@9.0.3: + dependencies: + brace-expansion: 2.0.3 + + minimatch@9.0.9: + dependencies: + brace-expansion: 2.0.3 + + mitt@3.0.1: {} + + monaco-editor@0.44.0: {} + + ms@2.1.3: {} + + muggle-string@0.3.1: {} + + nanoid@3.3.11: {} + + natural-compare@1.4.0: {} + + normalize-wheel-es@1.2.0: {} + + nth-check@2.1.1: + dependencies: + boolbase: 1.0.0 + + once@1.4.0: + dependencies: + wrappy: 1.0.2 + + optionator@0.9.4: + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + word-wrap: 1.2.5 + + p-limit@3.1.0: + dependencies: + yocto-queue: 0.1.0 + + p-locate@5.0.0: + dependencies: + p-limit: 3.1.0 + + parent-module@1.0.1: + dependencies: + callsites: 3.1.0 + + path-browserify@1.0.1: {} + + path-exists@4.0.0: {} + + path-is-absolute@1.0.1: {} + + path-key@3.1.1: {} + + path-type@4.0.0: {} + + perfect-debounce@1.0.0: {} + + picocolors@1.1.1: {} + + picomatch@2.3.2: {} + + pinia@2.3.1(typescript@5.3.3)(vue@3.5.32(typescript@5.3.3)): + dependencies: + '@vue/devtools-api': 6.6.4 + vue: 3.5.32(typescript@5.3.3) + vue-demi: 0.14.10(vue@3.5.32(typescript@5.3.3)) + optionalDependencies: + typescript: 5.3.3 + transitivePeerDependencies: + - '@vue/composition-api' + + postcss-selector-parser@6.1.2: + dependencies: + cssesc: 3.0.0 + util-deprecate: 1.0.2 + + postcss@8.5.8: + dependencies: + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + prelude-ls@1.2.1: {} + + prettier-linter-helpers@1.0.1: + dependencies: + fast-diff: 1.3.0 + + prettier@3.8.1: {} + + property-expr@2.0.6: {} + + proxy-from-env@2.1.0: {} + + punycode@2.3.1: {} + + queue-microtask@1.2.3: {} + + resolve-from@4.0.0: {} + + reusify@1.1.0: {} + + rfdc@1.4.1: {} + + rimraf@3.0.2: + dependencies: + glob: 7.2.3 + + rollup@4.60.1: + dependencies: + '@types/estree': 1.0.8 + optionalDependencies: + '@rollup/rollup-android-arm-eabi': 4.60.1 + '@rollup/rollup-android-arm64': 4.60.1 + '@rollup/rollup-darwin-arm64': 4.60.1 + '@rollup/rollup-darwin-x64': 4.60.1 + '@rollup/rollup-freebsd-arm64': 4.60.1 + '@rollup/rollup-freebsd-x64': 4.60.1 + '@rollup/rollup-linux-arm-gnueabihf': 4.60.1 + '@rollup/rollup-linux-arm-musleabihf': 4.60.1 + '@rollup/rollup-linux-arm64-gnu': 4.60.1 + '@rollup/rollup-linux-arm64-musl': 4.60.1 + '@rollup/rollup-linux-loong64-gnu': 4.60.1 + '@rollup/rollup-linux-loong64-musl': 4.60.1 + '@rollup/rollup-linux-ppc64-gnu': 4.60.1 + '@rollup/rollup-linux-ppc64-musl': 4.60.1 + '@rollup/rollup-linux-riscv64-gnu': 4.60.1 + '@rollup/rollup-linux-riscv64-musl': 4.60.1 + '@rollup/rollup-linux-s390x-gnu': 4.60.1 + '@rollup/rollup-linux-x64-gnu': 4.60.1 + '@rollup/rollup-linux-x64-musl': 4.60.1 + '@rollup/rollup-openbsd-x64': 4.60.1 + '@rollup/rollup-openharmony-arm64': 4.60.1 + '@rollup/rollup-win32-arm64-msvc': 4.60.1 + '@rollup/rollup-win32-ia32-msvc': 4.60.1 + '@rollup/rollup-win32-x64-gnu': 4.60.1 + '@rollup/rollup-win32-x64-msvc': 4.60.1 + fsevents: 2.3.3 + + run-parallel@1.2.0: + dependencies: + queue-microtask: 1.2.3 + + semver@7.7.4: {} + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + slash@3.0.0: {} + + socket.io-client@4.8.3: + dependencies: + '@socket.io/component-emitter': 3.1.2 + debug: 4.4.3 + engine.io-client: 6.6.4 + socket.io-parser: 4.2.6 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + + socket.io-parser@4.2.6: + dependencies: + '@socket.io/component-emitter': 3.1.2 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + + source-map-js@1.2.1: {} + + speakingurl@14.0.1: {} + + strip-ansi@6.0.1: + dependencies: + ansi-regex: 5.0.1 + + strip-json-comments@3.1.1: {} + + superjson@2.2.6: + dependencies: + copy-anything: 4.0.5 + + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + + synckit@0.11.12: + dependencies: + '@pkgr/core': 0.2.9 + + text-table@0.2.0: {} + + tiny-case@1.0.3: {} + + to-regex-range@5.0.1: + dependencies: + is-number: 7.0.0 + + toposort@2.0.2: {} + + ts-api-utils@1.4.3(typescript@5.3.3): + dependencies: + typescript: 5.3.3 + + type-check@0.4.0: + dependencies: + prelude-ls: 1.2.1 + + type-fest@0.20.2: {} + + type-fest@2.19.0: {} + + type-fest@4.41.0: {} + + typescript@5.3.3: {} + + undici-types@6.21.0: {} + + uri-js@4.4.1: + dependencies: + punycode: 2.3.1 + + util-deprecate@1.0.2: {} + + vee-validate@4.15.1(vue@3.5.32(typescript@5.3.3)): + dependencies: + '@vue/devtools-api': 7.7.9 + type-fest: 4.41.0 + vue: 3.5.32(typescript@5.3.3) + + vite@5.4.21(@types/node@20.19.39): + dependencies: + esbuild: 0.21.5 + postcss: 8.5.8 + rollup: 4.60.1 + optionalDependencies: + '@types/node': 20.19.39 + fsevents: 2.3.3 + + vue-component-type-helpers@3.2.6: {} + + vue-demi@0.14.10(vue@3.5.32(typescript@5.3.3)): + dependencies: + vue: 3.5.32(typescript@5.3.3) + + vue-eslint-parser@9.4.3(eslint@8.57.1): + dependencies: + debug: 4.4.3 + eslint: 8.57.1 + eslint-scope: 7.2.2 + eslint-visitor-keys: 3.4.3 + espree: 9.6.1 + esquery: 1.7.0 + lodash: 4.18.1 + semver: 7.7.4 + transitivePeerDependencies: + - supports-color + + vue-router@4.6.4(vue@3.5.32(typescript@5.3.3)): + dependencies: + '@vue/devtools-api': 6.6.4 + vue: 3.5.32(typescript@5.3.3) + + vue-template-compiler@2.7.16: + dependencies: + de-indent: 1.0.2 + he: 1.2.0 + + vue-tsc@1.8.27(typescript@5.3.3): + dependencies: + '@volar/typescript': 1.11.1 + '@vue/language-core': 1.8.27(typescript@5.3.3) + semver: 7.7.4 + typescript: 5.3.3 + + vue@3.5.32(typescript@5.3.3): + dependencies: + '@vue/compiler-dom': 3.5.32 + '@vue/compiler-sfc': 3.5.32 + '@vue/runtime-dom': 3.5.32 + '@vue/server-renderer': 3.5.32(vue@3.5.32(typescript@5.3.3)) + '@vue/shared': 3.5.32 + optionalDependencies: + typescript: 5.3.3 + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + word-wrap@1.2.5: {} + + wrappy@1.0.2: {} + + ws@8.18.3: {} + + xml-name-validator@4.0.0: {} + + xmlhttprequest-ssl@2.1.2: {} + + yocto-queue@0.1.0: {} + + yup@1.7.1: + dependencies: + property-expr: 2.0.6 + tiny-case: 1.0.3 + toposort: 2.0.2 + type-fest: 2.19.0 diff --git a/frontend/src/components/AgentChatPreview.vue b/frontend/src/components/AgentChatPreview.vue index 8d00fb2..918e219 100644 --- a/frontend/src/components/AgentChatPreview.vue +++ b/frontend/src/components/AgentChatPreview.vue @@ -386,11 +386,44 @@ const scrollToBottom = () => { }) } +/** + * 知你等工作流约定「自然语言 + 末尾单行 JSON(intent/reply/user_profile)」。 + * 聊天区若原样展示会重复;展示前去掉末尾 JSON 行(可连续多行);若整段只有 JSON 则用 reply 作为正文。 + */ +const stripOneTrailingWorkflowJsonLine = (raw: string): string => { + if (!raw || typeof raw !== 'string') return '' + const t = raw.trimEnd() + const lastNl = t.lastIndexOf('\n') + const lastLine = (lastNl >= 0 ? t.slice(lastNl + 1) : t).trim() + if (!lastLine.startsWith('{')) return raw + try { + const j = JSON.parse(lastLine) as Record + if (!j || typeof j !== 'object') return raw + const reply = j.reply + if (typeof reply !== 'string') return raw + const head = lastNl >= 0 ? t.slice(0, lastNl).trimEnd() : '' + if (head) return head + return reply + } catch { + return raw + } +} + +const stripTrailingWorkflowJsonLine = (raw: string): string => { + let cur = raw + for (let i = 0; i < 6; i++) { + const next = stripOneTrailingWorkflowJsonLine(cur) + if (next === cur) break + cur = next + } + return cur +} + // 格式化消息(支持简单的Markdown) const formatMessage = (content: string) => { if (!content) return '' - // 简单的换行处理 - return content.replace(/\n/g, '
') + const display = stripTrailingWorkflowJsonLine(content) + return display.replace(/\n/g, '
') } // 格式化时间 diff --git a/frontend/src/components/WorkflowEditor/WorkflowEditor.vue b/frontend/src/components/WorkflowEditor/WorkflowEditor.vue index 0b7e8b2..b4f0b7c 100644 --- a/frontend/src/components/WorkflowEditor/WorkflowEditor.vue +++ b/frontend/src/components/WorkflowEditor/WorkflowEditor.vue @@ -62,6 +62,10 @@ 垂直分布 + + + 修正连线锚点 + @@ -6214,6 +6218,84 @@ const onEdgesChange = (changes: any[]) => { } } +/** 节点中心与尺寸(无测量值时用与自动布局一致的默认宽高) */ +const getNodeLayoutMetrics = (n: Node) => { + const dw = n.dimensions?.width + const dh = n.dimensions?.height + const w = dw && dw > 0 ? dw : 200 + const h = dh && dh > 0 ? dh : 80 + return { + w, + h, + cx: n.position.x + w / 2, + cy: n.position.y + h / 2, + } +} + +/** + * 根据两节点相对位置选择连接点,避免「吸附」总接到上下锚点造成锯齿线。 + * 条件节点 true/false 出口保留,仅修正目标侧锚点。 + */ +const resolveOptimalEdgeHandles = ( + sourceId: string, + targetId: string, + currentSourceHandle?: string | null +): { sourceHandle: string; targetHandle: string } => { + const src = nodes.value.find((node) => node.id === sourceId) + const tgt = nodes.value.find((node) => node.id === targetId) + if (!src || !tgt) { + return { sourceHandle: currentSourceHandle || 'right', targetHandle: 'left' } + } + const a = getNodeLayoutMetrics(src) + const b = getNodeLayoutMetrics(tgt) + const dx = b.cx - a.cx + const dy = b.cy - a.cy + + if (currentSourceHandle === 'true' || currentSourceHandle === 'false') { + if (Math.abs(dx) >= Math.abs(dy)) { + return { sourceHandle: currentSourceHandle, targetHandle: 'left' } + } + return { sourceHandle: currentSourceHandle, targetHandle: 'top' } + } + + const horizontalish = Math.abs(dx) >= Math.abs(dy) * 0.85 + if (horizontalish) { + return { sourceHandle: 'right', targetHandle: 'left' } + } + if (dy > 8) { + return { sourceHandle: 'bottom', targetHandle: 'top' } + } + return { sourceHandle: 'right', targetHandle: 'left' } +} + +/** 批量将已有边的锚点校正为与布局一致的方向 */ +const normalizeAllWorkflowEdgeHandles = (opts?: { silent?: boolean }): number => { + let changed = 0 + for (const edge of edges.value) { + const { sourceHandle, targetHandle } = resolveOptimalEdgeHandles( + edge.source, + edge.target, + edge.sourceHandle + ) + if (edge.sourceHandle !== sourceHandle || edge.targetHandle !== targetHandle) { + updateEdge(edge.id, { sourceHandle, targetHandle }) + edge.sourceHandle = sourceHandle + edge.targetHandle = targetHandle + changed++ + } + } + if (changed > 0) { + hasChanges.value = true + pushHistory('normalize edge handles') + if (!opts?.silent) { + ElMessage.success(`已修正 ${changed} 条连线的锚点`) + } + } else if (!opts?.silent) { + ElMessage.info('连线锚点已在最佳方向') + } + return changed +} + // 连接验证函数 - 允许所有方向的连接(包括左右) const isValidConnection = (connection: Connection) => { console.log('验证连接:', connection) @@ -6228,14 +6310,11 @@ const isValidConnection = (connection: Connection) => { return false } - // 检查是否已经存在相同的连接 + // 同一对节点只保留一条边(锚点不同也视为重复,避免重复连线) const existingEdge = edges.value.find( - e => e.source === connection.source && - e.target === connection.target && - (e.sourceHandle === connection.sourceHandle || (!e.sourceHandle && !connection.sourceHandle)) && - (e.targetHandle === connection.targetHandle || (!e.targetHandle && !connection.targetHandle)) + (e) => e.source === connection.source && e.target === connection.target ) - + if (existingEdge) { return false } @@ -6265,19 +6344,22 @@ const onConnect = (connection: Connection) => { return } - // 检查是否已经存在相同的连接 + // 同一对节点只保留一条边 const existingEdge = edges.value.find( - e => e.source === connection.source && - e.target === connection.target && - (e.sourceHandle === connection.sourceHandle || (!e.sourceHandle && !connection.sourceHandle)) && - (e.targetHandle === connection.targetHandle || (!e.targetHandle && !connection.targetHandle)) + (e) => e.source === connection.source && e.target === connection.target ) - + if (existingEdge) { ElMessage.warning('连接已存在') return } - + + const { sourceHandle, targetHandle } = resolveOptimalEdgeHandles( + connection.source, + connection.target, + connection.sourceHandle + ) + // 清除选中的边 selectedEdge.value = null @@ -6285,8 +6367,8 @@ const onConnect = (connection: Connection) => { id: `edge_${connection.source}_${connection.target}_${Date.now()}`, source: connection.source, target: connection.target, - sourceHandle: connection.sourceHandle || undefined, - targetHandle: connection.targetHandle || undefined, + sourceHandle, + targetHandle, type: 'bezier', // 使用贝塞尔曲线(平滑曲线) animated: true, selectable: true, @@ -6906,6 +6988,11 @@ const handleClear = () => { // 节点对齐功能 const handleAlignNodes = (command: string) => { + if (command === 'normalize-edge-handles') { + normalizeAllWorkflowEdgeHandles() + return + } + // 获取选中的节点(支持多选) const selectedNodes = nodes.value.filter(node => node.selected) @@ -7054,147 +7141,117 @@ const handleAutoLayout = async () => { return } - // 构建邻接表(有向图) - const graph: Record = {} - const inDegree: Record = {} - - // 初始化 - nodes.value.forEach(node => { - graph[node.id] = [] - inDegree[node.id] = 0 + // 最长路径分层(Sugiyama 简化):rank[v]=1+max(rank[p]),同一 rank 的节点放在同一列,避免旧版「单节点层累加 currentX + 分支后重置 X」造成的重叠与超长飞线 + const nodeIds = nodes.value.map((n) => n.id) + const pred = new Map() + nodeIds.forEach((id) => pred.set(id, [])) + edges.value.forEach((edge) => { + if (!edge.source || !edge.target) return + if (!pred.has(edge.target)) return + pred.get(edge.target)!.push(edge.source) }) - - // 构建图 - edges.value.forEach(edge => { - if (graph[edge.source] && !graph[edge.source].includes(edge.target)) { - graph[edge.source].push(edge.target) - inDegree[edge.target] = (inDegree[edge.target] || 0) + 1 - } + + const rank: Record = {} + nodeIds.forEach((id) => { + const ps = pred.get(id) || [] + if (ps.length === 0) rank[id] = 0 }) - - // 拓扑排序,将节点分层 - const layers: string[][] = [] - const visited = new Set() - const queue: string[] = [] - - // 找到所有入度为0的节点(开始节点) - Object.keys(inDegree).forEach(nodeId => { - if (inDegree[nodeId] === 0) { - queue.push(nodeId) - } - }) - - // 如果没有入度为0的节点,使用开始节点 - if (queue.length === 0 && startNode) { - queue.push(startNode.id) + if (startNode && rank[startNode.id] === undefined) rank[startNode.id] = 0 + + let relaxIter = 0 + let changed = true + while (changed && relaxIter < nodeIds.length + 12) { + changed = false + relaxIter++ + edges.value.forEach((e) => { + if (rank[e.source] === undefined) return + const next = rank[e.source]! + 1 + if (rank[e.target] === undefined || rank[e.target]! < next) { + rank[e.target] = next + changed = true + } + }) } - - // 分层遍历 - while (queue.length > 0) { - const layer: string[] = [] - const layerSize = queue.length - - for (let i = 0; i < layerSize; i++) { - const nodeId = queue.shift()! - if (visited.has(nodeId)) continue - - visited.add(nodeId) - layer.push(nodeId) - - // 处理该节点的所有出边 - const neighbors = graph[nodeId] || [] - neighbors.forEach(neighborId => { - inDegree[neighborId] = (inDegree[neighborId] || 0) - 1 - if (inDegree[neighborId] === 0 && !visited.has(neighborId)) { - queue.push(neighborId) - } - }) - } - - if (layer.length > 0) { - layers.push(layer) - } - } - - // 处理未访问的节点(可能是孤立节点) - nodes.value.forEach(node => { - if (!visited.has(node.id)) { - if (layers.length === 0) { - layers.push([node.id]) - } else { - layers[layers.length - 1].push(node.id) + let maxR = 0 + nodeIds.forEach((id) => { + if (rank[id] !== undefined) maxR = Math.max(maxR, rank[id]!) + }) + nodeIds.forEach((id) => { + if (rank[id] === undefined) rank[id] = maxR + 1 + }) + maxR = Math.max(maxR, ...nodeIds.map((id) => rank[id]!)) + + const rankSet = new Set() + nodeIds.forEach((id) => rankSet.add(rank[id]!)) + const sortedRanks = [...rankSet].sort((a, b) => a - b) + const rankToCol = new Map(sortedRanks.map((r, i) => [r, i])) + + let layers: string[][] = Array.from({ length: sortedRanks.length }, () => []) + nodeIds.forEach((id) => { + const col = rankToCol.get(rank[id]!) ?? 0 + layers[col].push(id) + }) + + // 层内排序:重心法(barycenter)减少跨层边交叉,多轮上下扫掠 + const reorderLayersReduceCrossings = (ly: string[][], edgs: typeof edges.value): string[][] => { + if (ly.length <= 1) return ly.map(l => [...l]) + const out = ly.map(l => [...l]) + const pred = new Map() + const succ = new Map() + edgs.forEach((e) => { + if (!e?.source || !e?.target) return + if (!pred.has(e.target)) pred.set(e.target, []) + pred.get(e.target)!.push(e.source) + if (!succ.has(e.source)) succ.set(e.source, []) + succ.get(e.source)!.push(e.target) + }) + const rounds = 8 + for (let it = 0; it < rounds; it++) { + for (let k = 1; k < out.length; k++) { + const prev = out[k - 1] + const pos = new Map(prev.map((id, i) => [id, i])) + out[k].sort((a, b) => { + const pa = (pred.get(a) || []).filter((x) => pos.has(x)) + const pb = (pred.get(b) || []).filter((x) => pos.has(x)) + const ba = pa.length ? pa.reduce((s, x) => s + (pos.get(x) ?? 0), 0) / pa.length : 0 + const bb = pb.length ? pb.reduce((s, x) => s + (pos.get(x) ?? 0), 0) / pb.length : 0 + return ba - bb || a.localeCompare(b) + }) + } + for (let k = out.length - 2; k >= 0; k--) { + const next = out[k + 1] + const pos = new Map(next.map((id, i) => [id, i])) + out[k].sort((a, b) => { + const sa = (succ.get(a) || []).filter((x) => pos.has(x)) + const sb = (succ.get(b) || []).filter((x) => pos.has(x)) + const ba = sa.length ? sa.reduce((s, x) => s + (pos.get(x) ?? 0), 0) / sa.length : 0 + const bb = sb.length ? sb.reduce((s, x) => s + (pos.get(x) ?? 0), 0) / sb.length : 0 + return ba - bb || a.localeCompare(b) + }) } } - }) + return out + } + layers = reorderLayersReduceCrossings(layers, edges.value) - // 布局参数 + // 布局参数:按列摆开(列 = 拓扑深度),列内垂直堆叠;主线性链呈一行,分支呈一列,避免旧算法横向漂移与飞线 const nodeWidth = 200 const nodeHeight = 80 - const horizontalSpacing = 320 // 水平间距(节点之间的水平距离) - const verticalSpacing = 150 // 垂直间距(用于有分支的情况) - const startX = 100 - const startY = 200 - - // 检查是否是简单的线性工作流(每层只有一个节点) - let isLinearWorkflow = true - layers.forEach(layer => { - if (layer.length > 1) { - isLinearWorkflow = false - } - }) - - // 如果每层只有一个节点,使用水平线性布局(从左到右,所有节点在同一水平线) - if (isLinearWorkflow && layers.length > 1) { - // 水平线性布局:所有节点水平排列在同一水平线上 - layers.forEach((layer, layerIndex) => { - layer.forEach((nodeId) => { - const nodeX = startX + layerIndex * horizontalSpacing - const nodeY = startY // 所有节点在同一水平线上 - updateNode(nodeId, { - position: { x: nodeX, y: nodeY } - }) + const horizontalSpacing = 300 + const verticalSpacing = 130 + const startX = 80 + const startY = 220 + + layers.forEach((layer, colIndex) => { + const nodeX = startX + colIndex * horizontalSpacing + const n = layer.length + const y0 = startY - ((n - 1) * verticalSpacing) / 2 + layer.forEach((nodeId, j) => { + updateNode(nodeId, { + position: { x: nodeX, y: y0 + j * verticalSpacing } }) }) - } else { - // 层次布局:有分支的工作流 - // 优化策略:尽量让单节点层水平排列,多节点层才垂直排列 - let baseY = startY - let currentX = startX - let consecutiveSingleNodeLayers = 0 - - layers.forEach((layer, layerIndex) => { - if (layer.length === 1) { - // 单节点层:水平排列 - consecutiveSingleNodeLayers++ - const nodeId = layer[0] - const nodeX = currentX - // 如果连续多个单节点层,保持水平对齐 - const nodeY = baseY + (consecutiveSingleNodeLayers > 3 ? 20 : 0) // 如果连续太多,稍微下移 - updateNode(nodeId, { - position: { x: nodeX, y: nodeY } - }) - currentX += horizontalSpacing - } else { - // 多节点层:水平居中排列,使用新的Y坐标 - consecutiveSingleNodeLayers = 0 - baseY += verticalSpacing - currentX = startX // 重置X位置 - - const layerWidth = (layer.length - 1) * horizontalSpacing - const layerStartX = startX - - layer.forEach((nodeId, nodeIndex) => { - const nodeX = layerStartX + nodeIndex * horizontalSpacing - updateNode(nodeId, { - position: { x: nodeX, y: baseY } - }) - }) - - // 更新currentX为下一层的起始位置 - currentX = layerStartX + layerWidth + horizontalSpacing - } - }) - } + }) // 自动调整视口,使所有节点可见 await nextTick() @@ -7233,7 +7290,12 @@ const handleAutoLayout = async () => { } }, 100) - ElMessage.success(`自动布局完成,共 ${layers.length} 层,${nodes.value.length} 个节点`) + await nextTick() + const fixedHandles = normalizeAllWorkflowEdgeHandles({ silent: true }) + ElMessage.success( + `自动布局完成:${layers.length} 列(最长路径分层)+ 层内排序减交叉` + + (fixedHandles ? `,已优化 ${fixedHandles} 条连线锚点` : '') + ) hasChanges.value = true } diff --git a/frontend/src/utils/agentSkills.ts b/frontend/src/utils/agentSkills.ts new file mode 100644 index 0000000..2bb06e8 --- /dev/null +++ b/frontend/src/utils/agentSkills.ts @@ -0,0 +1,76 @@ +/** + * 从 Agent 工作流中提取 / 写入 LLM 节点的工具(skills)配置。 + */ +import type { WorkflowNode } from '@/types' + +/** 与后端 tools_bootstrap 内置工具一致,用于列表展示与配置勾选 */ +export const BUILTIN_SKILL_OPTIONS: { name: string; label: string }[] = [ + { name: 'http_request', label: 'HTTP 请求' }, + { name: 'file_read', label: '读文件' }, + { name: 'file_write', label: '写文件' }, + { name: 'text_analyze', label: '文本分析' }, + { name: 'datetime', label: '日期时间' }, + { name: 'math_calculate', label: '数学计算' }, + { name: 'system_info', label: '系统信息' }, + { name: 'json_process', label: 'JSON 处理' }, + { name: 'database_query', label: '数据库查询' }, + { name: 'adb_log', label: 'ADB 日志' } +] + +export const BUILTIN_SKILL_LABELS: Record = Object.fromEntries( + BUILTIN_SKILL_OPTIONS.map((o) => [o.name, o.label]) +) + +function isLlmNode(n: WorkflowNode): boolean { + const t = (n.type || '').toLowerCase() + const dt = (n.data?.type || '').toLowerCase() + return t === 'llm' || t === 'template' || dt === 'llm' +} + +/** 工作流中所有 LLM 节点上已选工具的并集(去重排序) */ +export function extractSkillToolNames(workflow_config: { nodes?: WorkflowNode[] } | null | undefined): string[] { + const set = new Set() + for (const n of workflow_config?.nodes || []) { + if (!isLlmNode(n)) continue + const raw = n.data?.tools ?? n.data?.selected_tools + if (Array.isArray(raw)) { + raw.forEach((x) => set.add(String(x))) + } + } + return Array.from(set).sort() +} + +/** + * 作为「能力配置」写入时的目标节点:优先 llm-unified,否则第一个带工具的 LLM,否则第一个 LLM。 + */ +export function findPrimaryLlmNodeForTools(nodes: WorkflowNode[] | undefined): WorkflowNode | null { + if (!nodes?.length) return null + const byId = nodes.find((n) => n.id === 'llm-unified' && isLlmNode(n)) + if (byId) return byId + const withTools = nodes.find( + (n) => + isLlmNode(n) && + (n.data?.enable_tools === true || + (Array.isArray(n.data?.tools) && n.data.tools.length > 0) || + (Array.isArray(n.data?.selected_tools) && n.data.selected_tools.length > 0)) + ) + if (withTools) return withTools + return nodes.find((n) => isLlmNode(n)) || null +} + +export function patchWorkflowSkillTools( + workflow_config: { nodes: WorkflowNode[]; edges: unknown[] }, + toolNames: string[] +): { nodes: WorkflowNode[]; edges: unknown[] } { + const wf = JSON.parse(JSON.stringify(workflow_config)) as { nodes: WorkflowNode[]; edges: unknown[] } + const target = findPrimaryLlmNodeForTools(wf.nodes) + if (!target) return wf + + const names = [...new Set(toolNames)].filter(Boolean).sort() + if (!target.data) target.data = {} + target.data.enable_tools = names.length > 0 + target.data.tools = names + target.data.selected_tools = names + + return wf +} diff --git a/frontend/src/views/Agents.vue b/frontend/src/views/Agents.vue index d74e22c..689e826 100644 --- a/frontend/src/views/Agents.vue +++ b/frontend/src/views/Agents.vue @@ -62,7 +62,23 @@ stripe > - + + + + - + + + + +
+ + + +
Agent:{{ skillAgentName }}
+ + + + + {{ opt.label }} + ({{ opt.name }}) + + + + +
+ + {{ ex }} + + (来自当前工作流,可关闭以移除) +
+
+
+
+ +
@@ -262,10 +341,18 @@ import { Upload, Download, UploadFilled, - ChatDotRound + ChatDotRound, + Tools } from '@element-plus/icons-vue' import { useAgentStore } from '@/stores/agent' import type { Agent } from '@/stores/agent' +import { + BUILTIN_SKILL_OPTIONS, + BUILTIN_SKILL_LABELS, + extractSkillToolNames, + patchWorkflowSkillTools, + findPrimaryLlmNodeForTools +} from '@/utils/agentSkills' const router = useRouter() const agentStore = useAgentStore() @@ -312,6 +399,80 @@ const importFileContent = ref(null) const importing = ref(false) const uploadRef = ref() +// 能力 / 技能配置 +const skillDialogVisible = ref(false) +const skillLoading = ref(false) +const skillSaving = ref(false) +const skillAgentId = ref(null) +const skillAgentName = ref('') +const skillBuiltinSelected = ref([]) +const skillExtraNames = ref([]) +const skillBuiltinOptions = BUILTIN_SKILL_OPTIONS +const BUILTIN_NAME_SET = new Set(BUILTIN_SKILL_OPTIONS.map((o) => o.name)) + +function skillLabel(name: string): string { + return BUILTIN_SKILL_LABELS[name] || name +} + +function skillTagsForRow(agent: Agent): string[] { + return extractSkillToolNames(agent.workflow_config) +} + +function handleSkillDialogClose() { + skillAgentId.value = null + skillAgentName.value = '' + skillBuiltinSelected.value = [] + skillExtraNames.value = [] +} + +async function handleSkillConfig(agent: Agent) { + skillAgentId.value = agent.id + skillAgentName.value = agent.name + skillDialogVisible.value = true + skillLoading.value = true + try { + const full = await agentStore.fetchAgent(agent.id) + const all = extractSkillToolNames(full.workflow_config) + skillBuiltinSelected.value = all.filter((n) => BUILTIN_NAME_SET.has(n)) + skillExtraNames.value = all.filter((n) => !BUILTIN_NAME_SET.has(n)) + const primary = findPrimaryLlmNodeForTools(full.workflow_config?.nodes) + if (!primary) { + ElMessage.warning('当前工作流中未找到 LLM 节点,请先在「设计」中添加 LLM 节点后再配置能力。') + } + } catch (e: any) { + ElMessage.error(e.response?.data?.detail || '加载 Agent 失败') + skillDialogVisible.value = false + } finally { + skillLoading.value = false + } +} + +function removeExtraSkill(name: string) { + skillExtraNames.value = skillExtraNames.value.filter((x) => x !== name) +} + +async function handleSkillSave() { + if (!skillAgentId.value) return + skillSaving.value = true + try { + const full = await agentStore.fetchAgent(skillAgentId.value) + const merged = [ + ...new Set([...skillBuiltinSelected.value, ...skillExtraNames.value].filter(Boolean)) + ].sort() + const wf = patchWorkflowSkillTools(full.workflow_config, merged) + await agentStore.updateAgent(skillAgentId.value, { + workflow_config: wf + }) + ElMessage.success('能力配置已保存') + skillDialogVisible.value = false + await loadAgents() + } catch (e: any) { + ElMessage.error(e.response?.data?.detail || '保存失败') + } finally { + skillSaving.value = false + } +} + // 表单验证规则 const rules = { name: [ @@ -662,4 +823,59 @@ onMounted(() => { display: flex; justify-content: flex-end; } + +.skill-tags { + display: flex; + flex-wrap: wrap; + gap: 4px; + align-items: center; + max-width: 100%; +} + +.skill-tag { + margin: 0; +} + +.skill-empty { + color: var(--el-text-color-placeholder); + font-size: 13px; +} + +.skill-alert { + margin-bottom: 8px; +} + +.skill-agent-name { + font-weight: 500; + margin-bottom: 4px; +} + +.skill-checkbox-group { + display: flex; + flex-direction: column; + gap: 8px; + align-items: flex-start; +} + +.skill-name { + color: var(--el-text-color-secondary); + font-size: 12px; + margin-left: 4px; +} + +.skill-extra-wrap { + display: flex; + flex-wrap: wrap; + gap: 6px; + align-items: center; +} + +.skill-extra-tag { + margin: 0; +} + +.skill-extra-hint { + font-size: 12px; + color: var(--el-text-color-secondary); +} diff --git a/frontend/vite.config.ts b/frontend/vite.config.ts index b1c72c6..471c195 100644 --- a/frontend/vite.config.ts +++ b/frontend/vite.config.ts @@ -11,7 +11,7 @@ export default defineConfig({ } }, server: { - port: 3000, + port: 3001, proxy: { '/api': { target: 'http://localhost:8037', diff --git a/start_windows.cmd b/start_windows.cmd new file mode 100644 index 0000000..4d9a384 --- /dev/null +++ b/start_windows.cmd @@ -0,0 +1,219 @@ +@echo off +echo ============================================== +echo 低代码智能体平台 - Windows 启动脚本 +echo ============================================== +echo. + +REM 检查Python是否安装 +python --version >nul 2>&1 +if errorlevel 1 ( + echo ❌ Python 未安装或未添加到系统PATH + echo 请安装 Python 3.11+ 并确保在PATH中 + pause + exit /b 1 +) + +REM 检查Node.js是否安装 +node --version >nul 2>&1 +if errorlevel 1 ( + echo ❌ Node.js 未安装或未添加到系统PATH + echo 请安装 Node.js 18+ 并确保在PATH中 + pause + exit /b 1 +) + +REM 检查pnpm是否安装 +pnpm --version >nul 2>&1 +if errorlevel 1 ( + echo ⚠️ pnpm 未安装,正在安装... + npm install -g pnpm + if errorlevel 1 ( + echo ❌ pnpm 安装失败 + pause + exit /b 1 + ) +) + +echo ✅ 环境检查通过 +echo. + +REM 进入项目目录 +cd /d "%~dp0" + +echo ============================================== +echo 1. Redis 检查 +echo ============================================== +echo. + +REM 检查Redis服务是否运行 +sc query Redis >nul 2>&1 +if errorlevel 1 ( + echo ❌ Redis 服务未运行 + echo. + echo 请按以下步骤安装Redis: + echo 1. 下载 Redis Windows 版本:https://github.com/microsoftarchive/redis/releases + echo 2. 下载 Redis-x64-3.2.100.msi + echo 3. 运行安装程序,按照默认设置安装 + echo 4. Redis 将作为 Windows 服务运行在 6379 端口 + echo. + echo 安装完成后,请重新运行此脚本 + pause + exit /b 1 +) else ( + echo ✅ Redis 服务正在运行 +) + +echo ============================================== +echo 2. 启动后端服务 +echo ============================================== +echo. + +REM 进入backend目录 +cd backend + +REM 检查虚拟环境 +if not exist "venv\Scripts\activate" ( + echo ⚠️ 虚拟环境不存在,正在创建... + python -m venv venv + if errorlevel 1 ( + echo ❌ 虚拟环境创建失败 + pause + exit /b 1 + ) +) + +echo ✅ 虚拟环境检查通过 + +REM 激活虚拟环境并安装依赖 +call venv\Scripts\activate + +echo 📦 检查Python依赖... +pip list | findstr "fastapi" >nul +if errorlevel 1 ( + echo ⚠️ 正在安装Python依赖... + pip install -r requirements.txt + if errorlevel 1 ( + echo ❌ Python依赖安装失败 + pause + exit /b 1 + ) + echo ✅ Python依赖安装完成 +) else ( + echo ✅ Python依赖已安装 +) + +echo. + +echo 🔧 配置环境变量... +if not exist ".env" ( + copy env.example .env >nul + echo ⚠️ 已创建 .env 文件,请检查配置 +) + +echo. + +echo 🗄️ 运行数据库迁移... +alembic upgrade head +if errorlevel 1 ( + echo ⚠️ 数据库迁移失败,继续启动... +) + +echo. + +echo 🌐 启动后端服务... +echo 后端服务将在 http://localhost:8037 启动 +echo API文档:http://localhost:8037/docs +echo. + +start cmd /k "uvicorn app.main:app --host 0.0.0.0 --port 8037 --reload" + +echo ⏳ 等待后端服务启动... +timeout /t 3 /nobreak >nul + +echo. + +echo ============================================== +echo 3. 启动 Celery Worker +echo ============================================== +echo. + +echo 🔄 启动 Celery Worker... +start cmd /k "celery -A app.core.celery_app worker --loglevel=info" + +echo ⏳ 等待 Celery Worker 启动... +timeout /t 2 /nobreak >nul + +echo. + +echo ============================================== +echo 4. 启动前端服务 +echo ============================================== +echo. + +REM 返回项目根目录 +cd .. + +REM 进入frontend目录 +cd frontend + +echo 📦 检查前端依赖... +if not exist "node_modules" ( + echo ⚠️ 正在安装前端依赖... + pnpm install + if errorlevel 1 ( + echo ❌ 前端依赖安装失败 + pause + exit /b 1 + ) + echo ✅ 前端依赖安装完成 +) else ( + echo ✅ 前端依赖已安装 +) + +echo. + +echo 🖥️ 启动前端服务... +echo 前端服务将在 http://localhost:3000 启动 +echo. + +start cmd /k "pnpm dev" + +echo ⏳ 等待前端服务启动... +timeout /t 5 /nobreak >nul + +echo. + +echo ============================================== +echo 🎉 启动完成! +echo ============================================== +echo. +echo 服务访问地址: +echo 📍 前端界面: http://localhost:3000 +echo 📍 后端API: http://localhost:8037 +echo 📍 API文档: http://localhost:8037/docs +echo. +echo 服务状态: +echo ✅ Redis 服务: 运行中 +echo ✅ 后端服务: 已启动 +echo ✅ Celery Worker: 已启动 +echo ✅ 前端服务: 已启动 +echo. +echo 📋 重要提示: +echo 1. 首次访问需要注册新用户 +echo 2. 保持所有命令行窗口打开 +echo 3. 停止服务:关闭所有命令行窗口 +echo. +echo ============================================== +echo. + +REM 返回项目根目录 +cd .. + +echo 按任意键打开浏览器访问前端界面... +pause >nul +start http://localhost:3000 + +echo. +echo 脚本执行完成! +echo 按任意键退出... +pause >nul \ No newline at end of file diff --git a/start_windows.ps1 b/start_windows.ps1 new file mode 100644 index 0000000..b511c5b --- /dev/null +++ b/start_windows.ps1 @@ -0,0 +1,239 @@ +# 低代码智能体平台 - Windows PowerShell 启动脚本 + +Write-Host "==============================================" -ForegroundColor Cyan +Write-Host "低代码智能体平台 - Windows 启动脚本" -ForegroundColor Cyan +Write-Host "==============================================" -ForegroundColor Cyan +Write-Host "" + +# 检查Python是否安装 +try { + $pythonVersion = python --version 2>&1 + Write-Host "✅ Python 版本: $pythonVersion" -ForegroundColor Green +} catch { + Write-Host "❌ Python 未安装或未添加到系统PATH" -ForegroundColor Red + Write-Host "请安装 Python 3.11+ 并确保在PATH中" + pause + exit 1 +} + +# 检查Node.js是否安装 +try { + $nodeVersion = node --version + Write-Host "✅ Node.js 版本: $nodeVersion" -ForegroundColor Green +} catch { + Write-Host "❌ Node.js 未安装或未添加到系统PATH" -ForegroundColor Red + Write-Host "请安装 Node.js 18+ 并确保在PATH中" + pause + exit 1 +} + +# 检查pnpm是否安装 +try { + $pnpmVersion = pnpm --version + Write-Host "✅ pnpm 版本: $pnpmVersion" -ForegroundColor Green +} catch { + Write-Host "⚠️ pnpm 未安装,正在安装..." -ForegroundColor Yellow + npm install -g pnpm + if ($LASTEXITCODE -ne 0) { + Write-Host "❌ pnpm 安装失败" -ForegroundColor Red + pause + exit 1 + } + Write-Host "✅ pnpm 安装成功" -ForegroundColor Green +} + +Write-Host "✅ 环境检查通过" -ForegroundColor Green +Write-Host "" + +# 设置项目目录 +$projectRoot = Split-Path -Parent $MyInvocation.MyCommand.Path +Set-Location $projectRoot + +Write-Host "==============================================" -ForegroundColor Cyan +Write-Host "1. Redis 检查" -ForegroundColor Cyan +Write-Host "==============================================" -ForegroundColor Cyan +Write-Host "" + +# 检查Redis服务是否运行 +$redisService = Get-Service -Name "Redis" -ErrorAction SilentlyContinue +if ($null -eq $redisService -or $redisService.Status -ne "Running") { + Write-Host "❌ Redis 服务未运行" -ForegroundColor Red + Write-Host "" + Write-Host "请按以下步骤安装Redis:" -ForegroundColor Yellow + Write-Host "1. 下载 Redis Windows 版本:https://github.com/microsoftarchive/redis/releases" + Write-Host "2. 下载 Redis-x64-3.2.100.msi" + Write-Host "3. 运行安装程序,按照默认设置安装" + Write-Host "4. Redis 将作为 Windows 服务运行在 6379 端口" + Write-Host "" + Write-Host "安装完成后,请重新运行此脚本" -ForegroundColor Yellow + pause + exit 1 +} else { + Write-Host "✅ Redis 服务正在运行 (状态: $($redisService.Status))" -ForegroundColor Green +} + +Write-Host "" +Write-Host "==============================================" -ForegroundColor Cyan +Write-Host "2. 启动后端服务" -ForegroundColor Cyan +Write-Host "==============================================" -ForegroundColor Cyan +Write-Host "" + +# 进入backend目录 +Set-Location "$projectRoot\backend" + +# 检查虚拟环境 +if (-not (Test-Path "venv\Scripts\activate")) { + Write-Host "⚠️ 虚拟环境不存在,正在创建..." -ForegroundColor Yellow + python -m venv venv + if ($LASTEXITCODE -ne 0) { + Write-Host "❌ 虚拟环境创建失败" -ForegroundColor Red + pause + exit 1 + } + Write-Host "✅ 虚拟环境创建成功" -ForegroundColor Green +} + +Write-Host "✅ 虚拟环境检查通过" -ForegroundColor Green + +# 激活虚拟环境 +$activateScript = "$projectRoot\backend\venv\Scripts\Activate.ps1" +if (Test-Path $activateScript) { + & $activateScript +} else { + Write-Host "❌ 虚拟环境激活脚本不存在: $activateScript" -ForegroundColor Red + pause + exit 1 +} + +Write-Host "📦 检查Python依赖..." -ForegroundColor Cyan +$fastapiInstalled = pip list | Select-String "fastapi" +if (-not $fastapiInstalled) { + Write-Host "⚠️ 正在安装Python依赖..." -ForegroundColor Yellow + pip install -r requirements.txt + if ($LASTEXITCODE -ne 0) { + Write-Host "❌ Python依赖安装失败" -ForegroundColor Red + pause + exit 1 + } + Write-Host "✅ Python依赖安装完成" -ForegroundColor Green +} else { + Write-Host "✅ Python依赖已安装" -ForegroundColor Green +} + +Write-Host "" + +Write-Host "🔧 配置环境变量..." -ForegroundColor Cyan +if (-not (Test-Path ".env")) { + Copy-Item env.example .env -ErrorAction SilentlyContinue + Write-Host "⚠️ 已创建 .env 文件,请检查配置" -ForegroundColor Yellow +} + +Write-Host "" + +Write-Host "🗄️ 运行数据库迁移..." -ForegroundColor Cyan +alembic upgrade head +if ($LASTEXITCODE -ne 0) { + Write-Host "⚠️ 数据库迁移失败,继续启动..." -ForegroundColor Yellow +} + +Write-Host "" + +Write-Host "🌐 启动后端服务..." -ForegroundColor Cyan +Write-Host "后端服务将在 http://localhost:8037 启动" -ForegroundColor Green +Write-Host "API文档:http://localhost:8037/docs" -ForegroundColor Green +Write-Host "" + +# 启动后端服务(新窗口) +Start-Process powershell -ArgumentList "-NoExit", "-Command", "cd '$projectRoot\backend'; .\venv\Scripts\Activate.ps1; uvicorn app.main:app --host 0.0.0.0 --port 8037 --reload" + +Write-Host "⏳ 等待后端服务启动..." -ForegroundColor Cyan +Start-Sleep -Seconds 3 + +Write-Host "" +Write-Host "==============================================" -ForegroundColor Cyan +Write-Host "3. 启动 Celery Worker" -ForegroundColor Cyan +Write-Host "==============================================" -ForegroundColor Cyan +Write-Host "" + +Write-Host "🔄 启动 Celery Worker..." -ForegroundColor Cyan +# Windows 下 prefork 池易卡住任务(Redis 出现大量 unacked),使用线程池更稳定 +Start-Process powershell -ArgumentList "-NoExit", "-Command", "cd '$projectRoot\backend'; .\venv\Scripts\Activate.ps1; celery -A app.core.celery_app worker --loglevel=info --pool=threads --concurrency=8" + +Write-Host "⏳ 等待 Celery Worker 启动..." -ForegroundColor Cyan +Start-Sleep -Seconds 2 + +Write-Host "" +Write-Host "==============================================" -ForegroundColor Cyan +Write-Host "4. 启动前端服务" -ForegroundColor Cyan +Write-Host "==============================================" -ForegroundColor Cyan +Write-Host "" + +# 返回项目根目录 +Set-Location $projectRoot + +# 进入frontend目录 +Set-Location "$projectRoot\frontend" + +Write-Host "📦 检查前端依赖..." -ForegroundColor Cyan +if (-not (Test-Path "node_modules")) { + Write-Host "⚠️ 正在安装前端依赖..." -ForegroundColor Yellow + pnpm install + if ($LASTEXITCODE -ne 0) { + Write-Host "❌ 前端依赖安装失败" -ForegroundColor Red + pause + exit 1 + } + Write-Host "✅ 前端依赖安装完成" -ForegroundColor Green +} else { + Write-Host "✅ 前端依赖已安装" -ForegroundColor Green +} + +Write-Host "" + +Write-Host "🖥️ 启动前端服务..." -ForegroundColor Cyan +Write-Host "前端服务将在 http://localhost:3000 启动" -ForegroundColor Green +Write-Host "" + +# 启动前端服务(新窗口) +Start-Process powershell -ArgumentList "-NoExit", "-Command", "cd '$projectRoot\frontend'; pnpm dev" + +Write-Host "⏳ 等待前端服务启动..." -ForegroundColor Cyan +Start-Sleep -Seconds 5 + +Write-Host "" +Write-Host "==============================================" -ForegroundColor Cyan +Write-Host "🎉 启动完成!" -ForegroundColor Green +Write-Host "==============================================" -ForegroundColor Cyan +Write-Host "" +Write-Host "服务访问地址:" -ForegroundColor White +Write-Host " 📍 前端界面: http://localhost:3000" -ForegroundColor Yellow +Write-Host " 📍 后端API: http://localhost:8037" -ForegroundColor Yellow +Write-Host " 📍 API文档: http://localhost:8037/docs" -ForegroundColor Yellow +Write-Host "" +Write-Host "服务状态:" -ForegroundColor White +Write-Host " ✅ Redis 服务: 运行中" -ForegroundColor Green +Write-Host " ✅ 后端服务: 已启动" -ForegroundColor Green +Write-Host " ✅ Celery Worker: 已启动" -ForegroundColor Green +Write-Host " ✅ 前端服务: 已启动" -ForegroundColor Green +Write-Host "" +Write-Host "📋 重要提示:" -ForegroundColor White +Write-Host " 1. 首次访问需要注册新用户" -ForegroundColor Gray +Write-Host " 2. 保持所有PowerShell窗口打开" -ForegroundColor Gray +Write-Host " 3. 停止服务:关闭所有PowerShell窗口" -ForegroundColor Gray +Write-Host "" +Write-Host "==============================================" -ForegroundColor Cyan +Write-Host "" + +# 返回项目根目录 +Set-Location $projectRoot + +Write-Host "是否要打开浏览器访问前端界面?(Y/N)" -ForegroundColor Cyan +$response = Read-Host +if ($response -eq "Y" -or $response -eq "y") { + Start-Process "http://localhost:3000" +} + +Write-Host "" +Write-Host "脚本执行完成!" -ForegroundColor Green +Write-Host "按任意键退出..." -ForegroundColor Gray +$null = $Host.UI.RawUI.ReadKey("NoEcho,IncludeKeyDown") \ No newline at end of file diff --git a/test_zhini_kefu_6.py b/test_zhini_kefu_6.py new file mode 100644 index 0000000..ce68578 --- /dev/null +++ b/test_zhini_kefu_6.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python3 +""" +测试「知你客服6号」Agent:登录 -> 创建执行 -> 轮询直到结束。 + +用法: + python test_zhini_kefu_6.py + python test_zhini_kefu_6.py --base-url http://127.0.0.1:8037 + set PLATFORM_BASE_URL=... && python test_zhini_kefu_6.py + +依赖: requests(与项目其他测试脚本一致) +""" +from __future__ import annotations + +import argparse +import json +import os +import sys +import time + +import requests + +# 知你客服6号(本地平台 Agent 管理中的名称对应 ID,若你环境不同请改此处或传 --agent-id) +DEFAULT_AGENT_ID = "2acc84d5-814b-4d61-9703-94a4b117375f" +DEFAULT_MESSAGE = "你好" + + +def main() -> int: + if sys.platform == "win32" and hasattr(sys.stdout, "reconfigure"): + try: + sys.stdout.reconfigure(encoding="utf-8") + sys.stderr.reconfigure(encoding="utf-8") + except Exception: + pass + + parser = argparse.ArgumentParser(description="测试知你客服6号,发送一条用户消息并打印结果") + parser.add_argument( + "--base-url", + default=os.getenv("PLATFORM_BASE_URL", "http://127.0.0.1:8037"), + help="平台 API 根地址(默认 http://127.0.0.1:8037)", + ) + parser.add_argument("--username", default=os.getenv("PLATFORM_USERNAME", "admin")) + parser.add_argument("--password", default=os.getenv("PLATFORM_PASSWORD", "123456")) + parser.add_argument("--agent-id", default=os.getenv("ZHINI_6_AGENT_ID", DEFAULT_AGENT_ID)) + parser.add_argument("--message", "-m", default=DEFAULT_MESSAGE, help="用户消息,默认:你好") + parser.add_argument( + "--user-id", + default="script_test_zhini6", + help="多轮记忆隔离用 user_id(可选)", + ) + parser.add_argument("--timeout", type=int, default=180, help="轮询最长秒数") + parser.add_argument("--poll", type=float, default=0.8, help="轮询间隔秒") + args = parser.parse_args() + + base = args.base_url.rstrip("/") + + print("=" * 60) + print("知你客服6号 执行测试") + print(" base_url :", base) + print(" agent_id :", args.agent_id) + print(" message :", args.message) + print("=" * 60) + + # 1. 登录 + r = requests.post( + f"{base}/api/v1/auth/login", + data={"username": args.username, "password": args.password}, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + timeout=15, + ) + if r.status_code != 200: + print("登录失败:", r.status_code, r.text[:500], file=sys.stderr) + return 1 + token = r.json().get("access_token") + if not token: + print("登录响应无 access_token", file=sys.stderr) + return 1 + + headers = { + "Authorization": f"Bearer {token}", + "Content-Type": "application/json", + } + + msg = args.message + body = { + "agent_id": args.agent_id, + "input_data": { + "query": msg, + "USER_INPUT": msg, + "user_id": args.user_id, + }, + } + + # 2. 创建执行 + r2 = requests.post(f"{base}/api/v1/executions", headers=headers, json=body, timeout=30) + if r2.status_code != 201: + print("创建执行失败:", r2.status_code, r2.text[:1000], file=sys.stderr) + return 1 + + ex = r2.json() + eid = ex["id"] + print("已创建执行:", eid) + print("初始状态:", ex.get("status")) + + # 3. 轮询 + deadline = time.time() + args.timeout + status = ex.get("status", "pending") + while status in ("pending", "running") and time.time() < deadline: + time.sleep(args.poll) + rs = requests.get(f"{base}/api/v1/executions/{eid}", headers=headers, timeout=60) + if rs.status_code != 200: + print("查询执行失败:", rs.status_code, rs.text[:500], file=sys.stderr) + return 1 + detail = rs.json() + status = detail.get("status", status) + print(" ...", status) + + final = requests.get(f"{base}/api/v1/executions/{eid}", headers=headers, timeout=60).json() + status = final.get("status") + print() + print("最终状态:", status) + if final.get("error_message"): + print("错误信息:", final["error_message"]) + od = final.get("output_data") + if od is not None: + print("output_data:") + print(json.dumps(od, ensure_ascii=False, indent=2)) + else: + print("output_data: null") + + return 0 if status == "completed" else 2 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/user_data/.gitkeep b/user_data/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/user_data/aaa.md b/user_data/aaa.md new file mode 100644 index 0000000..dca4615 --- /dev/null +++ b/user_data/aaa.md @@ -0,0 +1,3 @@ +# aaa + +written by test_write_user_data_aaa_md.py at 2026-04-07T03:30:57.611757+00:00 diff --git a/user_data/abb.md b/user_data/abb.md new file mode 100644 index 0000000..c759294 --- /dev/null +++ b/user_data/abb.md @@ -0,0 +1 @@ +# abb diff --git a/user_data/abc.md b/user_data/abc.md new file mode 100644 index 0000000..4def314 --- /dev/null +++ b/user_data/abc.md @@ -0,0 +1 @@ +# abc diff --git a/user_data/bbb.md b/user_data/bbb.md new file mode 100644 index 0000000..78506bb --- /dev/null +++ b/user_data/bbb.md @@ -0,0 +1 @@ +# bbb diff --git a/user_data/bbbb.md b/user_data/bbbb.md new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/user_data/bbbb.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/user_data/ccc.md b/user_data/ccc.md new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/user_data/ccc.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/user_data/xxx.md b/user_data/xxx.md new file mode 100644 index 0000000..c2f67c5 --- /dev/null +++ b/user_data/xxx.md @@ -0,0 +1 @@ +# test from script diff --git a/user_profile.json b/user_profile.json new file mode 100644 index 0000000..a38453c --- /dev/null +++ b/user_profile.json @@ -0,0 +1 @@ +{"name": "小七"} \ No newline at end of file diff --git a/上传git仓.md b/上传git仓.md new file mode 100644 index 0000000..e5c37ba --- /dev/null +++ b/上传git仓.md @@ -0,0 +1,2 @@ +将修改上传到git仓rjb_win_dev分支 +http://101.43.95.130:3001/admin/aiagent.git \ No newline at end of file diff --git a/望庐山瀑布.md b/望庐山瀑布.md new file mode 100644 index 0000000..446d9ab --- /dev/null +++ b/望庐山瀑布.md @@ -0,0 +1,30 @@ +# 望庐山瀑布 +**作者:李白(唐)** +日照香炉生紫烟, +遥看瀑布挂前川。 +飞流直下三千尺, +疑是银河落九天。 +--- +## 诗歌赏析 +### 创作背景 +《望庐山瀑布》是唐代伟大诗人李白创作的一首七言绝句,描绘了庐山瀑布的壮丽景色,展现了诗人豪放不羁的个性和对大自然的热爱。 +### 诗句解析 +1. **"日照香炉生紫烟"**:阳光照耀下的香炉峰升起紫色的烟雾,为瀑布营造了神秘的氛围。 +2. **"遥看瀑布挂前川"**:远远望去,瀑布像一条白练悬挂在山川之间。 +3. **"飞流直下三千尺"**:瀑布从高处飞泻而下,气势磅礴,夸张手法突显其雄伟。 +4. **"疑是银河落九天"**:诗人展开想象,怀疑这是天上的银河落到了人间,极富浪漫主义色彩。 +### 艺术特色 +- **夸张手法**:"三千尺"的夸张描写增强了瀑布的雄伟气势 +- **比喻精妙**:将瀑布比作银河,形象生动 +- **意境开阔**:从近景到远景,从现实到想象,意境层层递进 +- **语言简练**:短短四句,却描绘出完整的画面 +### 历史评价 +这首诗被誉为描写瀑布的千古绝唱,充分展现了李白诗歌的浪漫主义风格和超凡的想象力,成为中国古代山水诗的典范之作。 +--- +## 扩展阅读 +### 庐山简介 +庐山位于江西省九江市,是中国著名的风景名胜区和避暑胜地,以雄、奇、险、秀闻名于世,素有"匡庐奇秀甲天下"之美誉。 +### 李白与庐山 +李白曾多次游览庐山,留下了多首描写庐山的诗篇。《望庐山瀑布》是其最著名的庐山诗作之一,充分展现了他对自然山水的热爱和独特的艺术视角。 +### 相关诗作 +1. **《庐山谣寄卢侍御 \ No newline at end of file diff --git a/知你客服14号能力文档.md b/知你客服14号能力文档.md new file mode 100644 index 0000000..b28a4d9 --- /dev/null +++ b/知你客服14号能力文档.md @@ -0,0 +1,68 @@ +# 知你客服 14 号 · 能力说明 + +## 1. 定位 + +**知你客服 14 号**在 **知你客服 13 号** 工作流与提示词基础上演进:保留 13 号的连线整理、工具纪律与单行 JSON 输出约定,并将 LLM 可用工具扩展为平台当前注册的**全量内置工具**(与 `app.core.tools_bootstrap` 一致),用于需要 **HTTP、本地文件、系统信息、文本/时间/数学/JSON、只读数据库、Android 日志** 等能力的客服场景。 + +--- + +## 2. 版本关系 + +| 项目 | 说明 | +|------|------| +| 上游模板 | 默认由脚本自 **知你客服 13 号** 复制(可通过环境变量指定其他源 Agent) | +| 画布 | 与 13 号创建脚本一致:去自环、合并重复边、统一左右锚点、分层布局 | +| 记忆与会话 | 与 13/12 相同,依赖工作流 **Cache(`user_memory_*`)**、Redis 与可选 MySQL 持久化;详见 `agent记忆实现方案.md` | + +--- + +## 3. 内置工具一览(10 项) + +以下工具在 **「llm-unified」** 节点中 **`enable_tools`**,名称需与后端注册一致。 + +| 工具名 | 能力摘要 | +|--------|----------| +| **http_request** | HTTP(S) 请求:抓取网页、调用公开 API;按返回 JSON 的 `body` 等提炼信息。 | +| **file_read** | 读取工作区内的本地文件(相对路径或落在允许根目录下的绝对路径)。 | +| **file_write** | 写入/追加工作区内文件;受大小与路径校验约束。 | +| **system_info** | 系统与运行环境信息;含 **本地文件工作区根路径** `local_file_workspace_root`(回答「工作区在哪」等)。 | +| **text_analyze** | 文本分析:`text` + `operation`:`count`(字数/行数等)、`keywords`(简单词频)、`summary`(截取前几段作摘要)。 | +| **datetime** | 日期时间:常用 `operation=now`,可选 `format`(strftime)。 | +| **math_calculate** | 安全算术表达式计算(如四则运算、`sqrt` 等),结果以工具返回为准,禁止臆造。 | +| **json_process** | JSON:`operation` 为 `parse` / `stringify` / `validate`。 | +| **database_query** | **只读 SQL**:**仅允许 SELECT**;默认连平台库,可选 `data_source_id` 指定数据源;注意 `timeout`。 | +| **adb_log** | Android 设备日志:需运行环境已安装 **adb** 且设备可用;按工具参数拉取/过滤日志。 | + +--- + +## 4. 输出与纪律(继承 13 号并加强) + +- **最终回复**:仍以 **一行合法 JSON** 结尾(含 `intent`、`reply`、`user_profile` 等),无 Markdown 代码围栏。 +- **用户画像**:末行 JSON 中的 **`user_profile` 须如实填写**;用户已告知昵称时需含 **`name`**,避免空对象覆盖会话记忆(与 13 号约定一致)。 +- **file_write**:同一轮用户请求中避免无故多次写入;勿以本地文件写入替代 JSON 中的会话画像。 +- **DSML / invoke**:勿在正文中重复刷屏工具标签行;说明工具结果用自然语言,再以单行 JSON 收尾。 +- **database_query**:禁止非 SELECT;不得编造查询结果。 +- **adb_log**:仅在用户明确需要设备日志时使用;无环境或失败时如实转述工具错误信息。 + +--- + +## 5. 使用与维护 + +| 项目 | 说明 | +|------|------| +| 创建/更新脚本 | `backend/scripts/create_zhini_kefu_14.py` | +| 默认源/目标 Agent | 源:`知你客服13号`;目标:`知你客服14号`(可用环境变量覆盖) | +| 执行 API | 需携带稳定 **`user_id`**(若工作流 Cache 键含 `{{user_id}}`),以免记忆串用户 | +| 进程 | 工作流在 **Celery** 中执行;更新工具或引擎后需 **重启 API + Celery** | + +--- + +## 6. 能力与边界 + +- **database_query** 访问的是平台配置的数据库或已登记数据源,表结构与安全策略以部署环境为准。 +- **adb_log** 强依赖宿主机是否安装 adb、USB/网络调试是否连通,生产环境需单独评估权限与审计。 +- **text_analyze** 的摘要/关键词为轻量规则实现,复杂语义仍以 LLM 为主。 + +--- + +*文档内容随脚本与平台实现变更;以仓库内 `create_zhini_kefu_14.py` 及实际 Agent 配置为准。* diff --git a/知你客服能力的集成和扩展方案.md b/知你客服能力的集成和扩展方案.md new file mode 100644 index 0000000..ff612c0 --- /dev/null +++ b/知你客服能力的集成和扩展方案.md @@ -0,0 +1,156 @@ +# 知你客服能力的集成和扩展方案 + +本文面向**业务/App 对接**与**平台内二次开发**,说明如何将「知你客服」系列 Agent 接入自有系统,以及如何安全、可维护地扩展工具与版本。具体某版本能力见 `知你客服14号能力文档.md`,记忆机制见 `agent记忆实现方案.md`,App 侧 HTTP 细节可参考 `知你客服Agent智能聊天App接入方案.md`。 + +--- + +## 一、集成目标与边界 + +| 目标 | 说明 | +|------|------| +| **对话能力** | 通过平台 **执行 API** 创建异步任务,由 **Celery** 跑完工作流后返回 `output_data`。 | +| **多轮记忆** | 依赖工作流 **Cache** 键(常见 `user_memory_{user_id}`);集成侧必须传**稳定 `user_id`**,否则记忆串号或落在 `default`。 | +| **能力边界** | 具体能调用的工具、提示词约束以**当前 Agent 工作流**为准(如 14 号含全量内置工具);文件、DB、adb 等受**部署环境与配置**限制。 | + +--- + +## 二、集成方式选型 + +| 方式 | 适用 | 要点 | +|------|------|------| +| **后端代理(推荐)** | 移动 App、多端、需隐藏平台账号 | 服务端用平台账号换 `access_token`,代用户调 `POST /api/v1/executions`,再轮询状态/详情。 | +| **前端直连** | Web、用户已在平台登录 | 浏览器携带 Bearer Token 调同一套 API;需处理 CORS 与 Token 刷新。 | +| **内嵌平台对话页** | 快速验证 | iframe/WebView 打开平台「使用」页,不直接对接执行 API。 | + +仓库内 **SAARS** 等示例提供「平台登录 + 转发执行」的代理思路,可与上述方式一对照实现。 + +--- + +## 三、执行 API 集成要点 + +### 3.1 认证 + +- `POST /api/v1/auth/login`(`application/x-www-form-urlencoded`)获取 `access_token`。 +- 后续请求头:`Authorization: Bearer `。 + +### 3.2 创建执行 + +- `POST /api/v1/executions` +- 典型 body: + +```json +{ + "agent_id": "<知你客服某版本的 Agent UUID>", + "input_data": { + "query": "用户本轮输入", + "USER_INPUT": "用户本轮输入", + "user_id": "业务侧稳定用户标识" + } +} +``` + +- **`user_id`**:强烈建议传入且长期不变,与 Cache 键一致方可多轮隔离。 +- 字段名可与工作流 Start 节点约定对齐(常见 `query` / `USER_INPUT`)。 + +### 3.3 获取结果 + +1. `GET /api/v1/executions/{id}/status` 直至 `completed` / `failed`。 +2. `GET /api/v1/executions/{id}` 读取 **`output_data`**。 + +### 3.4 展示层处理(避免重复 JSON) + +知你类工作流常约定:**自然语言 + 最后一行单行 JSON**(`intent` / `reply` / `user_profile`)。 +前端若直接展示整段字符串,用户会看到「正文 + JSON」重复感。可参考平台 **`AgentChatPreview`** 中的做法:展示前去掉末行结构化 JSON,或仅展示 JSON 中的 `reply`(以产品为准)。 + +### 3.5 运维 + +- 工作流在 **Celery Worker** 中执行;升级**引擎或工具**后需 **重启 API + Celery**(可用 `backend/scripts/restart_api_worker.ps1`)。 +- 轮询间隔建议约 **0.5~1 s**,并设总超时。 + +--- + +## 四、记忆与持久化(集成侧责任) + +- **键隔离**:必须传 **`user_id`**;详见 `agent记忆实现方案.md`。 +- **Redis + 可选 MySQL**:热数据与持久化关系、合并规则、条数截断(如默认 `max_history_length`)见该文档。 +- **业务无关数据**:勿把敏感密钥写入可被 `file_write` 触及的公开路径;生产环境应配置 **`LOCAL_FILE_TOOLS_ROOT`** 等。 + +--- + +## 五、扩展路径总览 + +``` +┌─────────────────────────────────────────────────────────────┐ +│ 扩展维度 │ 主要改动位置 │ +├─────────────────────────────────────────────────────────────┤ +│ 新内置工具 │ builtin_tools 实现 + tools_bootstrap 注册 │ +│ 新版本 Agent │ scripts/create_zhini_kefu_XX.py 复制/补丁 │ +│ 工作流结构 │ 平台工作流编辑器 + 导出/入库 API │ +│ 提示词与工具列表 │ llm-unified 节点 data.prompt / tools │ +│ HTTP/外部工具 │ 平台「工具管理」中配置 + tool_registry │ +└─────────────────────────────────────────────────────────────┘ +``` + +--- + +## 六、扩展内置工具(后端) + +1. **实现**:在 `backend/app/services/builtin_tools.py` 中实现异步函数,并定义 **JSON Schema**(与 OpenAI function calling 兼容)。 +2. **注册**:在 `backend/app/core/tools_bootstrap.py` 的 `ensure_builtin_tools_registered` 中 **`register_builtin_tool(name, func, schema)`**。 +3. **生效**:确保 **API 进程与 Celery Worker** 均会 import 执行流(已含 bootstrap);部署后**重启**两处进程。 +4. **工作流**:在目标 Agent 的 **LLM 节点** `data.tools` / `selected_tools` 中加入新工具名,`enable_tools: true`。 + +**注意**:工具名、参数 schema 与模型实际调用必须一致;敏感能力(如 shell)不建议放入内置工具。 + +--- + +## 七、扩展「知你客服」版本(脚本化) + +- 仓库已提供 **`create_zhini_kefu_12.py` … `create_zhini_kefu_14.py`** 等脚本:登录平台 → 复制 Agent → 调整边与布局 → 更新 `llm-unified` 提示词与工具列表 → `PUT` 写回。 +- **推荐做法**:以**上一稳定版本**为源复制,只改差异(工具数组、追加提示段落、描述),便于回溯。 +- **文档**:为新版本补充 **`知你客服XX号能力文档.md`**,避免运营与开发认知不一致。 + +--- + +## 八、扩展工作流结构(低代码) + +- 在平台 **工作流设计器** 中增删节点:Cache、LLM、条件分支、向量检索、HTTP 等。 +- **记忆链路**勿随意断开:保证 **读 Cache → LLM → 写 Cache** 顺序仍可达,且 `user_memory_*` 键模板未破坏。 +- **自动布局**仅影响坐标,不改变执行语义;复杂图建议配合引擎的占位符与合并逻辑测试多轮对话。 + +--- + +## 九、安全与合规 + +| 项 | 建议 | +|----|------| +| **database_query** | 引擎侧限制 **SELECT**;生产应限制可见表/行或仅用只读账号;勿在提示词中鼓励随意扫库。 | +| **adb_log** | 仅在内网或受控环境开启;需本机 adb 与设备授权。 | +| **file_read / file_write** | 严格 **`LOCAL_FILE_TOOLS_ROOT`**;控制写入大小上限。 | +| **http_request** | 注意 SSRF 与出站策略(若平台侧有网关/白名单应一并配置)。 | +| **Token** | App 代理勿把平台账号密码下发客户端;`access_token` 缓存于服务端并处理过期。 | + +--- + +## 十、测试建议 + +- **单轮**:工具是否被调用、返回是否进入 `reply` 或末行 JSON。 +- **多轮**:同一 `user_id` 连续两轮,验证画像与历史是否写入 Cache/DB。 +- **回归**:升级引擎或工具后跑一遍「问名—再问名」与一次需要工具调用的用例。 + +--- + +## 十一、相关文档与代码索引 + +| 说明 | 路径 | +|------|------| +| 14 号能力与工具表 | `知你客服14号能力文档.md` | +| 记忆与 Redis/DB | `agent记忆实现方案.md` | +| App HTTP 接入示例 | `知你客服Agent智能聊天App接入方案.md` | +| 创建/更新 14 号脚本 | `backend/scripts/create_zhini_kefu_14.py` | +| 内置工具注册 | `backend/app/core/tools_bootstrap.py` | +| 引擎与 Cache | `backend/app/services/workflow_engine.py` | + +--- + +*随平台版本与仓库迭代,以实际接口与工作流配置为准。* diff --git a/静夜思.md b/静夜思.md new file mode 100644 index 0000000..a9bf2b8 --- /dev/null +++ b/静夜思.md @@ -0,0 +1,4 @@ +# 静夜思 + +床前明月光,疑是地上霜。 +举头望明月,低头思故乡。 diff --git a/项目核心文档汇总.md b/项目核心文档汇总.md new file mode 100644 index 0000000..f29f3c5 --- /dev/null +++ b/项目核心文档汇总.md @@ -0,0 +1,462 @@ +# 低代码智能体平台 - 核心文档汇总 + +## 项目概述 + +### 项目背景 +低代码智能体平台旨在让非技术用户通过可视化拖拽的方式,快速构建和部署AI智能体,降低AI应用开发门槛,提高开发效率。 + +### 核心价值 +- **零代码/低代码**:通过可视化界面配置智能体,无需编写代码 +- **快速部署**:一键部署到多种环境(云服务、本地、边缘设备) +- **灵活扩展**:支持自定义组件和插件机制 +- **多模型支持**:集成主流AI模型(OpenAI、Claude、DeepSeek等) +- **工作流编排**:支持复杂的工作流设计和执行 +- **Agent协作**:支持多Agent协作和工具链管理 + +### 目标用户 +- 产品经理和业务人员 +- 初级开发者 +- 企业数字化转型团队 +- AI应用开发者 + +## 系统架构设计 + +### 整体架构 +采用前后端分离的微服务架构: + +``` +┌─────────────────────────────────────────────────────────┐ +│ 前端层 (Frontend) │ +│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ +│ │ 可视化编辑器 │ │ 智能体管理 │ │ 监控面板 │ │ +│ └──────────────┘ └──────────────┘ └──────────────┘ │ +└─────────────────────────────────────────────────────────┘ + │ + │ HTTP/WebSocket + ▼ +┌─────────────────────────────────────────────────────────┐ +│ API网关层 (Gateway) │ +│ 认证、限流、路由、负载均衡 │ +└─────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────┐ +│ 业务服务层 (Services) │ +│ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐│ +│ │智能体引擎 │ │工作流引擎 │ │模型管理 │ │数据管理 ││ +│ └──────────┘ └──────────┘ └──────────┘ └──────────┘│ +│ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐│ +│ │任务调度 │ │日志监控 │ │用户管理 │ │权限管理 ││ +│ └──────────┘ └──────────┘ └──────────┘ └──────────┘│ +└─────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────┐ +│ 数据存储层 (Storage) │ +│ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐│ +│ │ MySQL │ │ │ │ Redis │ │ ││ +│ │(元数据) │ │ │ │(缓存) │ │ ││ +│ └──────────┘ └──────────┘ └──────────┘ └──────────┘│ +└─────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────┐ +│ 外部服务层 (External) │ +│ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐│ +│ │ OpenAI │ │ DeepSeek │ │ 本地模型 │ │ 其他API ││ +│ └──────────┘ └──────────┘ └──────────┘ └──────────┘│ +└─────────────────────────────────────────────────────────┘ +``` + +### 技术栈选型 + +#### 前端技术栈 +- **框架**: Vue 3 + TypeScript + Vite +- **状态管理**: Pinia +- **UI组件库**: Element Plus +- **工作流可视化**: Vue Flow +- **HTTP客户端**: Axios +- **WebSocket**: 原生WebSocket API + +#### 后端技术栈 +- **框架**: Python FastAPI +- **数据库**: MySQL(腾讯云数据库) +- **缓存/消息队列**: Redis +- **异步任务**: Celery +- **AI框架**: LangChain +- **数据库ORM**: SQLAlchemy +- **迁移工具**: Alembic +- **认证**: JWT + +## 核心功能模块 + +### 1. 用户认证系统 +- 用户注册、登录 +- JWT Token认证 +- 密码加密(bcrypt) +- 获取当前用户信息 + +### 2. 工作流管理 +- 工作流CRUD(创建、读取、更新、删除) +- 可视化编辑器(拖拽节点、连线) +- 节点配置面板 +- 工作流版本管理 +- 工作流导入导出 +- 工作流模板市场 + +### 3. 工作流执行引擎 +- DAG构建和拓扑排序 +- 节点执行器(支持多种节点类型) +- 数据流管理(节点间数据传递) +- Celery异步任务集成 +- 执行状态管理 +- 错误处理和重试机制 + +### 4. 节点类型支持 +- **基础节点**: 开始、输入、输出、结束 +- **AI节点**: LLM(支持OpenAI、DeepSeek等) +- **逻辑节点**: 条件、循环 +- **数据节点**: 转换、数据库查询、文件操作 +- **集成节点**: HTTP请求、Webhook、邮件、消息队列 +- **工具节点**: 定时任务、Agent节点 + +### 5. 内置工具调用 +平台提供8个内置工具,可在LLM节点中启用: +1. 🌐 **http_request** - HTTP请求工具 +2. 📖 **file_read** - 文件读取工具 +3. ✍️ **file_write** - 文件写入工具 +4. 📊 **text_analyze** - 文本分析工具 +5. 🕐 **datetime** - 日期时间工具 +6. 🔢 **math_calculate** - 数学计算工具 +7. 💻 **system_info** - 系统信息工具 +8. 📦 **json_process** - JSON处理工具 + +### 6. 执行管理 +- 创建执行任务 +- 获取执行记录列表 +- 获取执行详情 +- 执行状态实时推送(WebSocket) +- 执行结果可视化 +- 执行日志和监控 + +### 7. 数据源管理 +- 数据源模型和CRUD API +- 数据源连接测试 +- 数据查询功能 + +### 8. 模型配置管理 +- 模型配置CRUD API +- 多模型支持(OpenAI、DeepSeek等) +- 模型切换和配置 + +### 9. Agent管理 +- Agent CRUD API +- Agent管理页面 +- Agent协作功能 + +## 项目结构 + +``` +aiagent/ +├── frontend/ # 前端项目(Vue 3 + TypeScript) +│ ├── src/ +│ │ ├── assets/ # 静态资源 +│ │ ├── components/ # 组件 +│ │ ├── router/ # 路由 +│ │ ├── stores/ # 状态管理(Pinia) +│ │ ├── views/ # 页面 +│ │ └── App.vue # 根组件 +│ ├── package.json # 依赖配置 +│ ├── vite.config.ts # Vite配置 +│ └── Dockerfile.dev # 开发环境Dockerfile +├── backend/ # 后端项目(Python FastAPI) +│ ├── app/ +│ │ ├── api/ # API路由 +│ │ ├── core/ # 核心模块 +│ │ ├── models/ # 数据库模型 +│ │ ├── schemas/ # Pydantic模式 +│ │ ├── services/ # 业务逻辑 +│ │ └── main.py # 应用入口 +│ ├── alembic/ # 数据库迁移 +│ ├── tests/ # 测试 +│ ├── requirements.txt # Python依赖 +│ ├── Dockerfile.dev # 开发环境Dockerfile +│ └── alembic.ini # Alembic配置 +├── docker-compose.dev.yml # 开发环境Docker Compose配置 +├── README.md # 项目说明 +├── QUICKSTART.md # 快速启动指南 +├── 使用指南.md # 用户使用指南 +├── 方案-优化版.md # 完整技术方案 +├── 开发进度.md # 开发进度跟踪 +├── 内置工具列表.md # 内置工具文档 +└── 项目核心文档汇总.md # 本文档 +``` + +## 部署指南 + +### 前置要求 +- Node.js 18+ 和 pnpm +- Python 3.11+ +- Docker 和 Docker Compose +- MySQL(使用腾讯云数据库) +- Redis 7+(或使用Docker) + +### 使用 Docker Compose 启动(推荐) +```bash +# 启动所有服务 +docker-compose -f docker-compose.dev.yml up -d + +# 查看服务状态 +docker-compose ps + +# 查看日志 +docker-compose logs -f + +# 停止服务 +docker-compose down +``` + +### 本地开发部署 + +#### 后端服务 +```bash +cd backend + +# 创建虚拟环境 +python -m venv venv +source venv/bin/activate # Windows: venv\Scripts\activate + +# 安装依赖 +pip install -r requirements.txt + +# 配置环境变量 +cp env.example .env +# 编辑 .env 文件(数据库已配置为腾讯云MySQL) + +# 运行数据库迁移 +alembic upgrade head + +# 启动开发服务器 +uvicorn app.main:app --reload + +# 启动 Celery Worker(新终端) +celery -A app.core.celery_app worker --loglevel=info +``` + +#### 前端服务 +```bash +cd frontend + +# 安装依赖 +pnpm install + +# 启动开发服务器 +pnpm dev +``` + +### 服务访问地址 +- **前端**: http://localhost:8038 +- **后端API**: http://localhost:8037 +- **API文档**: http://localhost:8037/docs +- **Redis**: localhost:6379 + +## API 文档摘要 + +### 认证API +- `POST /api/v1/auth/register` - 用户注册 +- `POST /api/v1/auth/login` - 用户登录(获取JWT Token) +- `GET /api/v1/auth/me` - 获取当前用户信息 + +### 工作流API +- `GET /api/v1/workflows` - 获取工作流列表 +- `POST /api/v1/workflows` - 创建工作流 +- `GET /api/v1/workflows/{id}` - 获取工作流详情 +- `PUT /api/v1/workflows/{id}` - 更新工作流 +- `DELETE /api/v1/workflows/{id}` - 删除工作流 +- `POST /api/v1/workflows/{id}/execute` - 执行工作流 + +### 执行管理API +- `GET /api/v1/executions` - 获取执行记录列表 +- `GET /api/v1/executions/{id}` - 获取执行详情 +- `GET /api/v1/executions/{id}/status` - 获取执行状态 + +### 数据源API +- `GET /api/v1/data-sources` - 获取数据源列表 +- `POST /api/v1/data-sources` - 创建数据源 +- `POST /api/v1/data-sources/{id}/test` - 测试数据源连接 +- `POST /api/v1/data-sources/{id}/query` - 执行数据查询 + +### WebSocket API +- `ws://localhost:8037/ws/execution/{execution_id}` - 执行状态实时推送 + +完整API文档请访问:http://localhost:8037/docs + +## 用户指南摘要 + +### 快速开始使用 +1. **登录系统**:访问 http://localhost:8038,注册并登录 +2. **创建工作流**:点击"创建工作流"按钮进入可视化编辑器 +3. **拖拽节点**:从左侧工具箱拖拽节点到画布 +4. **连接节点**:点击节点的连接点并拖拽到目标节点 +5. **配置节点**:点击节点,在右侧配置面板设置参数 +6. **保存工作流**:点击工具栏的"保存"按钮 +7. **执行工作流**:点击工具栏的"运行"按钮 + +### 节点类型说明 +- **开始节点**:工作流起始节点 +- **输入节点**:数据输入节点,可配置输入参数 +- **LLM节点**:AI模型处理节点,支持OpenAI、DeepSeek等模型 +- **条件节点**:条件判断节点,支持表达式配置 +- **转换节点**:数据转换节点,支持JSONPath、模板等 +- **输出节点**:数据输出节点,可配置输出格式 +- **结束节点**:工作流结束节点 + +### 工具调用功能 +在LLM节点中启用工具调用,AI可以调用内置工具处理任务: +1. 选择LLM节点,打开"工具"标签页 +2. 启用"启用工具调用"开关 +3. 选择需要的工具(可多选) +4. 保存配置 + +## 开发指南 + +### 开发规范 +- **前端代码规范**:ESLint + Prettier +- **后端代码规范**:PEP 8 + Black +- **Git提交规范**:Conventional Commits +- **代码审查**:必须通过Code Review + +### 测试指南 +```bash +# 前端测试 +cd frontend +pnpm test + +# 后端测试 +cd backend +pytest +``` + +### 数据库迁移 +```bash +cd backend + +# 创建新的迁移 +alembic revision --autogenerate -m "描述" + +# 应用迁移 +alembic upgrade head + +# 回退迁移 +alembic downgrade -1 +``` + +### 添加新节点类型 +1. 后端:在 `backend/app/core/nodes/` 中添加节点执行器 +2. 后端:在 `backend/app/core/workflow_engine.py` 中注册节点类型 +3. 前端:在 `frontend/src/components/nodes/` 中添加节点组件 +4. 前端:在 `frontend/src/stores/workflowStore.ts` 中注册节点类型 + +## 测试指南 + +### 单元测试 +- 使用 pytest 框架 +- 测试核心功能和工作流引擎 +- 测试API端点 + +### 集成测试 +- 测试数据库操作 +- 测试外部API集成 +- 测试工作流执行 + +### 端到端测试 +- 测试用户界面交互 +- 测试完整工作流执行 +- 测试跨浏览器兼容性 + +## 内置工具列表 + +平台提供8个内置工具,详细功能如下: + +| 工具名称 | 功能描述 | 主要参数 | +|---------|---------|---------| +| http_request | 发送HTTP请求 | url, method, headers, body | +| file_read | 读取文件内容 | file_path | +| file_write | 写入文件内容 | file_path, content, mode | +| text_analyze | 分析文本内容 | text, operation | +| datetime | 处理日期时间 | operation, format | +| math_calculate | 执行数学计算 | expression | +| system_info | 获取系统信息 | 无 | +| json_process | 处理JSON数据 | json_string, operation | + +详细使用示例请参考:[内置工具列表.md](./内置工具列表.md) + +## 开发进度和路线图 + +### 当前完成度 +- **第一阶段MVP**: 100% ✅ +- **第二阶段核心功能**: 100% ✅ +- **第三阶段核心功能**: 100% ✅ +- **第四-七阶段功能**: 100% ✅ +- **整体项目**: 约 85-90% + +### 已完成核心功能 +1. **完整的用户认证系统** - 注册、登录、JWT认证 +2. **工作流CRUD** - 创建、读取、更新、删除工作流 +3. **工作流执行引擎** - DAG构建、拓扑排序、节点执行 +4. **可视化编辑器** - 拖拽节点、连线、配置面板 +5. **异步任务处理** - Celery集成,支持长时间运行的任务 +6. **多模型支持** - OpenAI、DeepSeek集成 +7. **内置工具调用** - 8个内置工具支持 +8. **实时状态推送** - WebSocket实时推送执行状态 + +### 近期开发重点(高优先级) +1. **监控和告警前端界面** - 系统监控面板、告警规则管理 +2. **用户体验优化** - 工作流编辑器优化、Agent使用体验优化 +3. **生产环境部署配置** - Docker/K8s配置、监控和日志集成 + +### 长期规划 +1. **多租户支持** - 租户模型、数据隔离、资源配额管理 +2. **插件系统** - 插件注册机制、自定义节点插件开发框架 +3. **性能优化** - 工作流执行性能优化、前端性能优化 + +详细开发进度请参考:[开发进度.md](./开发进度.md) + +## 常见问题 + +### 1. 数据库连接失败 +- 检查MySQL数据库是否可访问(腾讯云数据库) +- 检查数据库连接信息是否正确(.env 文件) +- 检查网络连接是否正常 + +### 2. Redis 连接失败 +- 检查Redis是否正在运行 +- 检查Redis URL是否正确 + +### 3. 前端无法连接后端 +- 检查后端服务是否正在运行 +- 检查前端配置的API URL是否正确(vite.config.ts) +- 检查CORS配置是否正确 + +### 4. Celery 任务不执行 +- 检查Celery Worker是否正在运行 +- 检查Redis连接是否正常 +- 检查任务是否正确注册 + +### 5. 工作流执行失败 +- 检查节点配置是否正确 +- 检查LLM API密钥是否有效 +- 查看执行日志获取详细错误信息 + +## 联系和支持 + +- **API文档**:http://localhost:8037/docs +- **前端服务**:http://localhost:8038 +- **问题反馈**:查看项目文档或联系开发团队 + +--- + +**最后更新**: 2026-04-06 +**文档版本**: 1.0 + +*本文档基于项目现有文档整理生成,涵盖项目核心信息。详细技术方案请参考[方案-优化版.md](./方案-优化版.md)。* \ No newline at end of file