feat: DeepSeek v4 模型对齐、作业助手脚本与 Agent 对比测试

- 前端 WorkflowEditor/ModelConfigs/NodeTemplates:deepseek-v4-flash、v4-pro,弃用提示
- llm_service 默认 deepseek-v4-flash;workflow_engine 等与模型配置注入
- 作业管理脚本支持 AGENT_NAME 与 v4-pro;新增 compare_homework_agents 脚本
- 文档重命名为 (红头)项目核心文档汇总.md 并更新 DeepSeek 说明

Made-with: Cursor
This commit is contained in:
renjianbo
2026-04-30 00:57:13 +08:00
parent cadeb2dc32
commit 4366312946
12 changed files with 488 additions and 55 deletions

View File

@@ -645,6 +645,25 @@
<el-option label="DeepSeek" value="deepseek" />
</el-select>
</el-form-item>
<el-form-item label="已配置模型">
<el-select
v-model="selectedNode.data.model_config_id"
placeholder="从模型配置中选择(可选)"
clearable
filterable
@change="handleConfiguredModelChange"
>
<el-option
v-for="cfg in availableConfiguredModels"
:key="cfg.id"
:label="`${cfg.name} (${cfg.provider}/${cfg.model_name})`"
:value="cfg.id"
/>
</el-select>
<div style="margin-top: 5px; color: #909399; font-size: 12px;">
选择后会回填提供商与模型保存工作流后运行时将通过 model_config_id 使用你在模型配置中保存的密钥与 API 地址需与当前 Agent/工作流属主一致
</div>
</el-form-item>
<el-form-item label="提示词">
<div class="prompt-input-wrapper" style="position: relative;">
<el-input
@@ -764,8 +783,11 @@
<el-option label="GPT-4 Turbo" value="gpt-4-turbo-preview" />
</template>
<template v-else-if="selectedNode.data.provider === 'deepseek'">
<el-option label="DeepSeek Chat" value="deepseek-chat" />
<el-option label="DeepSeek V4 Flash推荐" value="deepseek-v4-flash" />
<el-option label="DeepSeek V4 Pro推荐" value="deepseek-v4-pro" />
<el-option label="DeepSeek Coder" value="deepseek-coder" />
<el-option label="DeepSeek Chat兼容计划弃用 2026/07/24" value="deepseek-chat" />
<el-option label="DeepSeek Reasoner兼容计划弃用 2026/07/24" value="deepseek-reasoner" />
</template>
</el-select>
</el-form-item>
@@ -827,6 +849,25 @@
<el-option label="DeepSeek" value="deepseek" />
</el-select>
</el-form-item>
<el-form-item label="已配置模型">
<el-select
v-model="selectedNode.data.model_config_id"
placeholder="从模型配置中选择(可选)"
clearable
filterable
@change="handleConfiguredModelChange"
>
<el-option
v-for="cfg in availableConfiguredModels"
:key="cfg.id"
:label="`${cfg.name} (${cfg.provider}/${cfg.model_name})`"
:value="cfg.id"
/>
</el-select>
<div style="margin-top: 5px; color: #909399; font-size: 12px;">
选择后会回填提供商与模型保存工作流后运行时将通过 model_config_id 使用你在模型配置中保存的密钥与 API 地址需与当前 Agent/工作流属主一致
</div>
</el-form-item>
<el-form-item label="提示词">
<el-input
v-model="selectedNode.data.prompt"
@@ -846,8 +887,11 @@
<el-option label="GPT-4 Turbo" value="gpt-4-turbo-preview" />
</template>
<template v-else-if="selectedNode.data.provider === 'deepseek'">
<el-option label="DeepSeek Chat" value="deepseek-chat" />
<el-option label="DeepSeek V4 Flash推荐" value="deepseek-v4-flash" />
<el-option label="DeepSeek V4 Pro推荐" value="deepseek-v4-pro" />
<el-option label="DeepSeek Coder" value="deepseek-coder" />
<el-option label="DeepSeek Chat兼容计划弃用 2026/07/24" value="deepseek-chat" />
<el-option label="DeepSeek Reasoner兼容计划弃用 2026/07/24" value="deepseek-reasoner" />
</template>
</el-select>
</el-form-item>
@@ -2910,6 +2954,14 @@ import { StartNode, LLMNode, ConditionNode, EndNode, DefaultNode } from './NodeT
import { useCollaboration } from '@/composables/useCollaboration'
import NodeExecutionDetail from './NodeExecutionDetail.vue'
interface ConfiguredModelItem {
id: string
name: string
provider: string
model_name: string
base_url?: string
}
const props = defineProps<{
workflowId?: string
agentId?: string
@@ -2986,6 +3038,50 @@ const hasUpstreamNodes = computed(() => {
// 节点模板相关
const nodeTemplates = ref<any[]>([])
const loadingTemplates = ref(false)
const configuredModels = ref<ConfiguredModelItem[]>([])
const availableConfiguredModels = computed(() => {
const p = selectedNode.value?.data?.provider
if (!p) return configuredModels.value
return configuredModels.value.filter(cfg => cfg.provider === p)
})
const loadConfiguredModels = async () => {
try {
const response = await api.get('/api/v1/model-configs', {
params: {
limit: 100
}
})
configuredModels.value = Array.isArray(response.data) ? response.data : []
} catch (error) {
console.warn('[WorkflowEditor] 加载模型配置失败:', error)
configuredModels.value = []
}
}
const handleConfiguredModelChange = (configId: string) => {
if (!selectedNode.value) return
if (!configId) return
const cfg = configuredModels.value.find(item => item.id === configId)
if (!cfg) return
selectedNode.value.data.provider = cfg.provider
selectedNode.value.data.model = cfg.model_name
}
watch(
() => selectedNode.value?.data?.provider,
(newProv) => {
const sn = selectedNode.value
if (!sn || (sn.type !== 'llm' && sn.type !== 'template')) return
const mid = sn.data?.model_config_id as string | undefined
if (!mid || !newProv) return
const cfg = configuredModels.value.find((c) => c.id === mid)
if (cfg && cfg.provider !== newProv) {
sn.data.model_config_id = ''
}
}
)
// 加载节点模板列表
const loadNodeTemplates = async () => {
@@ -4538,7 +4634,7 @@ const getScenarios = (nodeType: string) => {
],
config: {
provider: 'deepseek',
model: 'deepseek-chat',
model: 'deepseek-v4-flash',
prompt: '请总结以下内容100字以内{{input}}',
temperature: 0.5,
max_tokens: 500
@@ -4558,7 +4654,7 @@ const getScenarios = (nodeType: string) => {
],
config: {
provider: 'deepseek',
model: 'deepseek-chat',
model: 'deepseek-v4-flash',
prompt: '请把下列内容翻译成{{target_lang}}{{input}}',
temperature: 0.3,
max_tokens: 1000
@@ -4571,7 +4667,7 @@ const getScenarios = (nodeType: string) => {
icon: 'DataAnalysis',
config: {
provider: 'deepseek',
model: 'deepseek-chat',
model: 'deepseek-v4-flash',
prompt: '请从以下文本中提取关键信息以JSON格式返回{{input}}',
temperature: 0.2,
max_tokens: 1000
@@ -4584,7 +4680,7 @@ const getScenarios = (nodeType: string) => {
icon: 'Sort',
config: {
provider: 'deepseek',
model: 'deepseek-chat',
model: 'deepseek-v4-flash',
prompt: '请对以下文本进行分类:{{input}}',
temperature: 0.1,
max_tokens: 200
@@ -4701,7 +4797,7 @@ const configTemplates = ref<Array<{
nodeType: 'llm',
config: {
provider: 'deepseek',
model: 'deepseek-chat',
model: 'deepseek-v4-flash',
prompt: '请总结以下内容100字以内{{input}}',
temperature: 0.5,
max_tokens: 500
@@ -4715,7 +4811,7 @@ const configTemplates = ref<Array<{
nodeType: 'llm',
config: {
provider: 'deepseek',
model: 'deepseek-chat',
model: 'deepseek-v4-flash',
prompt: '请把下列内容翻译成英文:{{input}}',
temperature: 0.3,
max_tokens: 1000
@@ -5138,7 +5234,7 @@ const applyTemplate = () => {
if (t === 'llm') {
if (templateSelection.value === 'llm_summary') {
selectedNode.value.data.provider = selectedNode.value.data.provider || 'deepseek'
selectedNode.value.data.model = selectedNode.value.data.model || 'deepseek-chat'
selectedNode.value.data.model = selectedNode.value.data.model || 'deepseek-v4-flash'
selectedNode.value.data.prompt = '请总结以下内容100字以内{text}'
selectedNode.value.data.temperature = 0.5
} else if (templateSelection.value === 'llm_translate') {
@@ -5218,12 +5314,12 @@ const applyTemplate = () => {
if (t === 'llm') {
if (templateSelection.value === 'llm_extract') {
selectedNode.value.data.provider = selectedNode.value.data.provider || 'deepseek'
selectedNode.value.data.model = selectedNode.value.data.model || 'deepseek-chat'
selectedNode.value.data.model = selectedNode.value.data.model || 'deepseek-v4-flash'
selectedNode.value.data.prompt = '请从以下文本中提取关键信息JSON格式{text}'
selectedNode.value.data.temperature = 0.3
} else if (templateSelection.value === 'llm_classify') {
selectedNode.value.data.provider = selectedNode.value.data.provider || 'deepseek'
selectedNode.value.data.model = selectedNode.value.data.model || 'deepseek-chat'
selectedNode.value.data.model = selectedNode.value.data.model || 'deepseek-v4-flash'
selectedNode.value.data.prompt = '请将以下内容分类为:正面/中性/负面。内容:{text}'
selectedNode.value.data.temperature = 0.2
}
@@ -5431,7 +5527,7 @@ const handleDrop = (event: DragEvent) => {
// LLM节点默认配置
...(isLLMNode && !isTemplateNode ? {
provider: 'deepseek',
model: 'deepseek-chat',
model: 'deepseek-v4-flash',
prompt: '请处理用户请求。',
temperature: 0.5,
max_tokens: 1500,
@@ -5441,7 +5537,7 @@ const handleDrop = (event: DragEvent) => {
// 模板节点默认配置
...(isTemplateNode ? {
provider: 'deepseek',
model: 'deepseek-chat',
model: 'deepseek-v4-flash',
prompt: '',
temperature: 0.7,
max_tokens: 1500,
@@ -7757,6 +7853,8 @@ onMounted(async () => {
// 加载节点模板列表
loadNodeTemplates()
// 加载模型配置列表用于LLM节点快速选择已配置模型
loadConfiguredModels()
// 加载测试用例
loadTestCases()
// 加载配置模板