feat: knowledge pipeline (#25360)
Signed-off-by: -LAN- <laipz8200@outlook.com> Co-authored-by: twwu <twwu@dify.ai> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> Co-authored-by: jyong <718720800@qq.com> Co-authored-by: Wu Tianwei <30284043+WTW0313@users.noreply.github.com> Co-authored-by: QuantumGhost <obelisk.reg+git@gmail.com> Co-authored-by: lyzno1 <yuanyouhuilyz@gmail.com> Co-authored-by: quicksand <quicksandzn@gmail.com> Co-authored-by: Jyong <76649700+JohnJyong@users.noreply.github.com> Co-authored-by: lyzno1 <92089059+lyzno1@users.noreply.github.com> Co-authored-by: zxhlyh <jasonapring2015@outlook.com> Co-authored-by: Yongtao Huang <yongtaoh2022@gmail.com> Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Joel <iamjoel007@gmail.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: nite-knite <nkCoding@gmail.com> Co-authored-by: Hanqing Zhao <sherry9277@gmail.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry <xh001x@hotmail.com>
This commit is contained in:
@@ -2,26 +2,28 @@ import json
|
||||
import logging
|
||||
from collections.abc import Mapping, Sequence
|
||||
from datetime import datetime
|
||||
from enum import StrEnum, auto
|
||||
from typing import TYPE_CHECKING, Any, Union, cast
|
||||
from enum import StrEnum
|
||||
from typing import TYPE_CHECKING, Any, Optional, Union, cast
|
||||
from uuid import uuid4
|
||||
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import DateTime, exists, orm, select
|
||||
from sqlalchemy import DateTime, Select, exists, orm, select
|
||||
|
||||
from core.file.constants import maybe_file_object
|
||||
from core.file.models import File
|
||||
from core.variables import utils as variable_utils
|
||||
from core.variables.variables import FloatVariable, IntegerVariable, StringVariable
|
||||
from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, SYSTEM_VARIABLE_NODE_ID
|
||||
from core.workflow.nodes.enums import NodeType
|
||||
from core.workflow.enums import NodeType
|
||||
from extensions.ext_storage import Storage
|
||||
from factories.variable_factory import TypeMismatchError, build_segment_with_type
|
||||
from libs.datetime_utils import naive_utc_now
|
||||
from libs.uuid_utils import uuidv7
|
||||
|
||||
from ._workflow_exc import NodeNotFoundError, WorkflowDataError
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from models.model import AppMode
|
||||
from models.model import AppMode, UploadFile
|
||||
|
||||
from sqlalchemy import Index, PrimaryKeyConstraint, String, UniqueConstraint, func
|
||||
from sqlalchemy.orm import Mapped, declared_attr, mapped_column
|
||||
@@ -35,7 +37,7 @@ from libs import helper
|
||||
from .account import Account
|
||||
from .base import Base
|
||||
from .engine import db
|
||||
from .enums import CreatorUserRole, DraftVariableType
|
||||
from .enums import CreatorUserRole, DraftVariableType, ExecutionOffLoadType
|
||||
from .types import EnumText, StringUUID
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -46,8 +48,9 @@ class WorkflowType(StrEnum):
|
||||
Workflow Type Enum
|
||||
"""
|
||||
|
||||
WORKFLOW = auto()
|
||||
CHAT = auto()
|
||||
WORKFLOW = "workflow"
|
||||
CHAT = "chat"
|
||||
RAG_PIPELINE = "rag-pipeline"
|
||||
|
||||
@classmethod
|
||||
def value_of(cls, value: str) -> "WorkflowType":
|
||||
@@ -143,6 +146,9 @@ class Workflow(Base):
|
||||
_conversation_variables: Mapped[str] = mapped_column(
|
||||
"conversation_variables", sa.Text, nullable=False, server_default="{}"
|
||||
)
|
||||
_rag_pipeline_variables: Mapped[str] = mapped_column(
|
||||
"rag_pipeline_variables", db.Text, nullable=False, server_default="{}"
|
||||
)
|
||||
|
||||
VERSION_DRAFT = "draft"
|
||||
|
||||
@@ -159,6 +165,7 @@ class Workflow(Base):
|
||||
created_by: str,
|
||||
environment_variables: Sequence[Variable],
|
||||
conversation_variables: Sequence[Variable],
|
||||
rag_pipeline_variables: list[dict],
|
||||
marked_name: str = "",
|
||||
marked_comment: str = "",
|
||||
) -> "Workflow":
|
||||
@@ -173,6 +180,7 @@ class Workflow(Base):
|
||||
workflow.created_by = created_by
|
||||
workflow.environment_variables = environment_variables or []
|
||||
workflow.conversation_variables = conversation_variables or []
|
||||
workflow.rag_pipeline_variables = rag_pipeline_variables or []
|
||||
workflow.marked_name = marked_name
|
||||
workflow.marked_comment = marked_comment
|
||||
workflow.created_at = naive_utc_now()
|
||||
@@ -314,6 +322,12 @@ class Workflow(Base):
|
||||
|
||||
return variables
|
||||
|
||||
def rag_pipeline_user_input_form(self) -> list:
|
||||
# get user_input_form from start node
|
||||
variables: list[Any] = self.rag_pipeline_variables
|
||||
|
||||
return variables
|
||||
|
||||
@property
|
||||
def unique_hash(self) -> str:
|
||||
"""
|
||||
@@ -354,7 +368,7 @@ class Workflow(Base):
|
||||
if not tenant_id:
|
||||
return []
|
||||
|
||||
environment_variables_dict: dict[str, Any] = json.loads(self._environment_variables)
|
||||
environment_variables_dict: dict[str, Any] = json.loads(self._environment_variables or "{}")
|
||||
results = [
|
||||
variable_factory.build_environment_variable_from_mapping(v) for v in environment_variables_dict.values()
|
||||
]
|
||||
@@ -424,6 +438,7 @@ class Workflow(Base):
|
||||
"features": self.features_dict,
|
||||
"environment_variables": [var.model_dump(mode="json") for var in environment_variables],
|
||||
"conversation_variables": [var.model_dump(mode="json") for var in self.conversation_variables],
|
||||
"rag_pipeline_variables": self.rag_pipeline_variables,
|
||||
}
|
||||
return result
|
||||
|
||||
@@ -442,6 +457,23 @@ class Workflow(Base):
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
@property
|
||||
def rag_pipeline_variables(self) -> list[dict]:
|
||||
# TODO: find some way to init `self._conversation_variables` when instance created.
|
||||
if self._rag_pipeline_variables is None:
|
||||
self._rag_pipeline_variables = "{}"
|
||||
|
||||
variables_dict: dict[str, Any] = json.loads(self._rag_pipeline_variables)
|
||||
results = list(variables_dict.values())
|
||||
return results
|
||||
|
||||
@rag_pipeline_variables.setter
|
||||
def rag_pipeline_variables(self, values: list[dict]) -> None:
|
||||
self._rag_pipeline_variables = json.dumps(
|
||||
{item["variable"]: item for item in values},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def version_from_datetime(d: datetime) -> str:
|
||||
return str(d)
|
||||
@@ -606,9 +638,10 @@ class WorkflowNodeExecutionTriggeredFrom(StrEnum):
|
||||
|
||||
SINGLE_STEP = "single-step"
|
||||
WORKFLOW_RUN = "workflow-run"
|
||||
RAG_PIPELINE_RUN = "rag-pipeline-run"
|
||||
|
||||
|
||||
class WorkflowNodeExecutionModel(Base):
|
||||
class WorkflowNodeExecutionModel(Base): # This model is expected to have `offload_data` preloaded in most cases.
|
||||
"""
|
||||
Workflow Node Execution
|
||||
|
||||
@@ -725,6 +758,32 @@ class WorkflowNodeExecutionModel(Base):
|
||||
created_by: Mapped[str] = mapped_column(StringUUID)
|
||||
finished_at: Mapped[datetime | None] = mapped_column(DateTime)
|
||||
|
||||
offload_data: Mapped[list["WorkflowNodeExecutionOffload"]] = orm.relationship(
|
||||
"WorkflowNodeExecutionOffload",
|
||||
primaryjoin="WorkflowNodeExecutionModel.id == foreign(WorkflowNodeExecutionOffload.node_execution_id)",
|
||||
uselist=True,
|
||||
lazy="raise",
|
||||
back_populates="execution",
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def preload_offload_data(
|
||||
query: Select[tuple["WorkflowNodeExecutionModel"]] | orm.Query["WorkflowNodeExecutionModel"],
|
||||
):
|
||||
return query.options(orm.selectinload(WorkflowNodeExecutionModel.offload_data))
|
||||
|
||||
@staticmethod
|
||||
def preload_offload_data_and_files(
|
||||
query: Select[tuple["WorkflowNodeExecutionModel"]] | orm.Query["WorkflowNodeExecutionModel"],
|
||||
):
|
||||
return query.options(
|
||||
orm.selectinload(WorkflowNodeExecutionModel.offload_data).options(
|
||||
# Using `joinedload` instead of `selectinload` to minimize database roundtrips,
|
||||
# as `selectinload` would require separate queries for `inputs_file` and `outputs_file`.
|
||||
orm.selectinload(WorkflowNodeExecutionOffload.file),
|
||||
)
|
||||
)
|
||||
|
||||
@property
|
||||
def created_by_account(self):
|
||||
created_by_role = CreatorUserRole(self.created_by_role)
|
||||
@@ -773,9 +832,132 @@ class WorkflowNodeExecutionModel(Base):
|
||||
provider_type=tool_info["provider_type"],
|
||||
provider_id=tool_info["provider_id"],
|
||||
)
|
||||
|
||||
elif self.node_type == NodeType.DATASOURCE.value and "datasource_info" in self.execution_metadata_dict:
|
||||
datasource_info = self.execution_metadata_dict["datasource_info"]
|
||||
extras["icon"] = datasource_info.get("icon")
|
||||
return extras
|
||||
|
||||
def _get_offload_by_type(self, type_: ExecutionOffLoadType) -> Optional["WorkflowNodeExecutionOffload"]:
|
||||
return next(iter([i for i in self.offload_data if i.type_ == type_]), None)
|
||||
|
||||
@property
|
||||
def inputs_truncated(self) -> bool:
|
||||
"""Check if inputs were truncated (offloaded to external storage)."""
|
||||
return self._get_offload_by_type(ExecutionOffLoadType.INPUTS) is not None
|
||||
|
||||
@property
|
||||
def outputs_truncated(self) -> bool:
|
||||
"""Check if outputs were truncated (offloaded to external storage)."""
|
||||
return self._get_offload_by_type(ExecutionOffLoadType.OUTPUTS) is not None
|
||||
|
||||
@property
|
||||
def process_data_truncated(self) -> bool:
|
||||
"""Check if process_data were truncated (offloaded to external storage)."""
|
||||
return self._get_offload_by_type(ExecutionOffLoadType.PROCESS_DATA) is not None
|
||||
|
||||
@staticmethod
|
||||
def _load_full_content(session: orm.Session, file_id: str, storage: Storage):
|
||||
from .model import UploadFile
|
||||
|
||||
stmt = sa.select(UploadFile).where(UploadFile.id == file_id)
|
||||
file = session.scalars(stmt).first()
|
||||
assert file is not None, f"UploadFile with id {file_id} should exist but not"
|
||||
content = storage.load(file.key)
|
||||
return json.loads(content)
|
||||
|
||||
def load_full_inputs(self, session: orm.Session, storage: Storage) -> Mapping[str, Any] | None:
|
||||
offload = self._get_offload_by_type(ExecutionOffLoadType.INPUTS)
|
||||
if offload is None:
|
||||
return self.inputs_dict
|
||||
|
||||
return self._load_full_content(session, offload.file_id, storage)
|
||||
|
||||
def load_full_outputs(self, session: orm.Session, storage: Storage) -> Mapping[str, Any] | None:
|
||||
offload: WorkflowNodeExecutionOffload | None = self._get_offload_by_type(ExecutionOffLoadType.OUTPUTS)
|
||||
if offload is None:
|
||||
return self.outputs_dict
|
||||
|
||||
return self._load_full_content(session, offload.file_id, storage)
|
||||
|
||||
def load_full_process_data(self, session: orm.Session, storage: Storage) -> Mapping[str, Any] | None:
|
||||
offload: WorkflowNodeExecutionOffload | None = self._get_offload_by_type(ExecutionOffLoadType.PROCESS_DATA)
|
||||
if offload is None:
|
||||
return self.process_data_dict
|
||||
|
||||
return self._load_full_content(session, offload.file_id, storage)
|
||||
|
||||
|
||||
class WorkflowNodeExecutionOffload(Base):
|
||||
__tablename__ = "workflow_node_execution_offload"
|
||||
__table_args__ = (
|
||||
# PostgreSQL 14 treats NULL values as distinct in unique constraints by default,
|
||||
# allowing multiple records with NULL values for the same column combination.
|
||||
#
|
||||
# This behavior allows us to have multiple records with NULL node_execution_id,
|
||||
# simplifying garbage collection process.
|
||||
UniqueConstraint(
|
||||
"node_execution_id",
|
||||
"type",
|
||||
# Note: PostgreSQL 15+ supports explicit `nulls distinct` behavior through
|
||||
# `postgresql_nulls_not_distinct=False`, which would make our intention clearer.
|
||||
# We rely on PostgreSQL's default behavior of treating NULLs as distinct values.
|
||||
# postgresql_nulls_not_distinct=False,
|
||||
),
|
||||
)
|
||||
_HASH_COL_SIZE = 64
|
||||
|
||||
id: Mapped[str] = mapped_column(
|
||||
StringUUID,
|
||||
primary_key=True,
|
||||
server_default=sa.text("uuidv7()"),
|
||||
)
|
||||
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime, default=naive_utc_now, server_default=func.current_timestamp()
|
||||
)
|
||||
|
||||
tenant_id: Mapped[str] = mapped_column(StringUUID)
|
||||
app_id: Mapped[str] = mapped_column(StringUUID)
|
||||
|
||||
# `node_execution_id` indicates the `WorkflowNodeExecutionModel` associated with this offload record.
|
||||
# A value of `None` signifies that this offload record is not linked to any execution record
|
||||
# and should be considered for garbage collection.
|
||||
node_execution_id: Mapped[str | None] = mapped_column(StringUUID, nullable=True)
|
||||
type_: Mapped[ExecutionOffLoadType] = mapped_column(EnumText(ExecutionOffLoadType), name="type", nullable=False)
|
||||
|
||||
# Design Decision: Combining inputs and outputs into a single object was considered to reduce I/O
|
||||
# operations. However, due to the current design of `WorkflowNodeExecutionRepository`,
|
||||
# the `save` method is called at two distinct times:
|
||||
#
|
||||
# - When the node starts execution: the `inputs` field exists, but the `outputs` field is absent
|
||||
# - When the node completes execution (either succeeded or failed): the `outputs` field becomes available
|
||||
#
|
||||
# It's difficult to correlate these two successive calls to `save` for combined storage.
|
||||
# Converting the `WorkflowNodeExecutionRepository` to buffer the first `save` call and flush
|
||||
# when execution completes was also considered, but this would make the execution state unobservable
|
||||
# until completion, significantly damaging the observability of workflow execution.
|
||||
#
|
||||
# Given these constraints, `inputs` and `outputs` are stored separately to maintain real-time
|
||||
# observability and system reliability.
|
||||
|
||||
# `file_id` references to the offloaded storage object containing the data.
|
||||
file_id: Mapped[str] = mapped_column(StringUUID, nullable=False)
|
||||
|
||||
execution: Mapped[WorkflowNodeExecutionModel] = orm.relationship(
|
||||
foreign_keys=[node_execution_id],
|
||||
lazy="raise",
|
||||
uselist=False,
|
||||
primaryjoin="WorkflowNodeExecutionOffload.node_execution_id == WorkflowNodeExecutionModel.id",
|
||||
back_populates="offload_data",
|
||||
)
|
||||
|
||||
file: Mapped[Optional["UploadFile"]] = orm.relationship(
|
||||
foreign_keys=[file_id],
|
||||
lazy="raise",
|
||||
uselist=False,
|
||||
primaryjoin="WorkflowNodeExecutionOffload.file_id == UploadFile.id",
|
||||
)
|
||||
|
||||
|
||||
class WorkflowAppLogCreatedFrom(StrEnum):
|
||||
"""
|
||||
@@ -939,7 +1121,10 @@ class WorkflowDraftVariable(Base):
|
||||
]
|
||||
|
||||
__tablename__ = "workflow_draft_variables"
|
||||
__table_args__ = (UniqueConstraint(*unique_app_id_node_id_name()),)
|
||||
__table_args__ = (
|
||||
UniqueConstraint(*unique_app_id_node_id_name()),
|
||||
Index("workflow_draft_variable_file_id_idx", "file_id"),
|
||||
)
|
||||
# Required for instance variable annotation.
|
||||
__allow_unmapped__ = True
|
||||
|
||||
@@ -1000,9 +1185,16 @@ class WorkflowDraftVariable(Base):
|
||||
selector: Mapped[str] = mapped_column(sa.String(255), nullable=False, name="selector")
|
||||
|
||||
# The data type of this variable's value
|
||||
#
|
||||
# If the variable is offloaded, `value_type` represents the type of the truncated value,
|
||||
# which may differ from the original value's type. Typically, they are the same,
|
||||
# but in cases where the structurally truncated value still exceeds the size limit,
|
||||
# text slicing is applied, and the `value_type` is converted to `STRING`.
|
||||
value_type: Mapped[SegmentType] = mapped_column(EnumText(SegmentType, length=20))
|
||||
|
||||
# The variable's value serialized as a JSON string
|
||||
#
|
||||
# If the variable is offloaded, `value` contains a truncated version, not the full original value.
|
||||
value: Mapped[str] = mapped_column(sa.Text, nullable=False, name="value")
|
||||
|
||||
# Controls whether the variable should be displayed in the variable inspection panel
|
||||
@@ -1022,6 +1214,35 @@ class WorkflowDraftVariable(Base):
|
||||
default=None,
|
||||
)
|
||||
|
||||
# Reference to WorkflowDraftVariableFile for offloaded large variables
|
||||
#
|
||||
# Indicates whether the current draft variable is offloaded.
|
||||
# If not offloaded, this field will be None.
|
||||
file_id: Mapped[str | None] = mapped_column(
|
||||
StringUUID,
|
||||
nullable=True,
|
||||
default=None,
|
||||
comment="Reference to WorkflowDraftVariableFile if variable is offloaded to external storage",
|
||||
)
|
||||
|
||||
is_default_value: Mapped[bool] = mapped_column(
|
||||
sa.Boolean,
|
||||
nullable=False,
|
||||
default=False,
|
||||
comment=(
|
||||
"Indicates whether the current value is the default for a conversation variable. "
|
||||
"Always `FALSE` for other types of variables."
|
||||
),
|
||||
)
|
||||
|
||||
# Relationship to WorkflowDraftVariableFile
|
||||
variable_file: Mapped[Optional["WorkflowDraftVariableFile"]] = orm.relationship(
|
||||
foreign_keys=[file_id],
|
||||
lazy="raise",
|
||||
uselist=False,
|
||||
primaryjoin="WorkflowDraftVariableFile.id == WorkflowDraftVariable.file_id",
|
||||
)
|
||||
|
||||
# Cache for deserialized value
|
||||
#
|
||||
# NOTE(QuantumGhost): This field serves two purposes:
|
||||
@@ -1171,6 +1392,9 @@ class WorkflowDraftVariable(Base):
|
||||
case _:
|
||||
return DraftVariableType.NODE
|
||||
|
||||
def is_truncated(self) -> bool:
|
||||
return self.file_id is not None
|
||||
|
||||
@classmethod
|
||||
def _new(
|
||||
cls,
|
||||
@@ -1181,6 +1405,7 @@ class WorkflowDraftVariable(Base):
|
||||
value: Segment,
|
||||
node_execution_id: str | None,
|
||||
description: str = "",
|
||||
file_id: str | None = None,
|
||||
) -> "WorkflowDraftVariable":
|
||||
variable = WorkflowDraftVariable()
|
||||
variable.created_at = _naive_utc_datetime()
|
||||
@@ -1190,6 +1415,7 @@ class WorkflowDraftVariable(Base):
|
||||
variable.node_id = node_id
|
||||
variable.name = name
|
||||
variable.set_value(value)
|
||||
variable.file_id = file_id
|
||||
variable._set_selector(list(variable_utils.to_selector(node_id, name)))
|
||||
variable.node_execution_id = node_execution_id
|
||||
return variable
|
||||
@@ -1245,6 +1471,7 @@ class WorkflowDraftVariable(Base):
|
||||
node_execution_id: str,
|
||||
visible: bool = True,
|
||||
editable: bool = True,
|
||||
file_id: str | None = None,
|
||||
) -> "WorkflowDraftVariable":
|
||||
variable = cls._new(
|
||||
app_id=app_id,
|
||||
@@ -1252,6 +1479,7 @@ class WorkflowDraftVariable(Base):
|
||||
name=name,
|
||||
node_execution_id=node_execution_id,
|
||||
value=value,
|
||||
file_id=file_id,
|
||||
)
|
||||
variable.visible = visible
|
||||
variable.editable = editable
|
||||
@@ -1262,5 +1490,92 @@ class WorkflowDraftVariable(Base):
|
||||
return self.last_edited_at is not None
|
||||
|
||||
|
||||
class WorkflowDraftVariableFile(Base):
|
||||
"""Stores metadata about files associated with large workflow draft variables.
|
||||
|
||||
This model acts as an intermediary between WorkflowDraftVariable and UploadFile,
|
||||
allowing for proper cleanup of orphaned files when variables are updated or deleted.
|
||||
|
||||
The MIME type of the stored content is recorded in `UploadFile.mime_type`.
|
||||
Possible values are 'application/json' for JSON types other than plain text,
|
||||
and 'text/plain' for JSON strings.
|
||||
"""
|
||||
|
||||
__tablename__ = "workflow_draft_variable_files"
|
||||
|
||||
# Primary key
|
||||
id: Mapped[str] = mapped_column(
|
||||
StringUUID,
|
||||
primary_key=True,
|
||||
default=uuidv7,
|
||||
server_default=sa.text("uuidv7()"),
|
||||
)
|
||||
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime,
|
||||
nullable=False,
|
||||
default=_naive_utc_datetime,
|
||||
server_default=func.current_timestamp(),
|
||||
)
|
||||
|
||||
tenant_id: Mapped[str] = mapped_column(
|
||||
StringUUID,
|
||||
nullable=False,
|
||||
comment="The tenant to which the WorkflowDraftVariableFile belongs, referencing Tenant.id",
|
||||
)
|
||||
|
||||
app_id: Mapped[str] = mapped_column(
|
||||
StringUUID,
|
||||
nullable=False,
|
||||
comment="The application to which the WorkflowDraftVariableFile belongs, referencing App.id",
|
||||
)
|
||||
|
||||
user_id: Mapped[str] = mapped_column(
|
||||
StringUUID,
|
||||
nullable=False,
|
||||
comment="The owner to of the WorkflowDraftVariableFile, referencing Account.id",
|
||||
)
|
||||
|
||||
# Reference to the `UploadFile.id` field
|
||||
upload_file_id: Mapped[str] = mapped_column(
|
||||
StringUUID,
|
||||
nullable=False,
|
||||
comment="Reference to UploadFile containing the large variable data",
|
||||
)
|
||||
|
||||
# -------------- metadata about the variable content --------------
|
||||
|
||||
# The `size` is already recorded in UploadFiles. It is duplicated here to avoid an additional database lookup.
|
||||
size: Mapped[int | None] = mapped_column(
|
||||
sa.BigInteger,
|
||||
nullable=False,
|
||||
comment="Size of the original variable content in bytes",
|
||||
)
|
||||
|
||||
length: Mapped[int | None] = mapped_column(
|
||||
sa.Integer,
|
||||
nullable=True,
|
||||
comment=(
|
||||
"Length of the original variable content. For array and array-like types, "
|
||||
"this represents the number of elements. For object types, it indicates the number of keys. "
|
||||
"For other types, the value is NULL."
|
||||
),
|
||||
)
|
||||
|
||||
# The `value_type` field records the type of the original value.
|
||||
value_type: Mapped[SegmentType] = mapped_column(
|
||||
EnumText(SegmentType, length=20),
|
||||
nullable=False,
|
||||
)
|
||||
|
||||
# Relationship to UploadFile
|
||||
upload_file: Mapped["UploadFile"] = orm.relationship(
|
||||
foreign_keys=[upload_file_id],
|
||||
lazy="raise",
|
||||
uselist=False,
|
||||
primaryjoin="WorkflowDraftVariableFile.upload_file_id == UploadFile.id",
|
||||
)
|
||||
|
||||
|
||||
def is_system_variable_editable(name: str) -> bool:
|
||||
return name in _EDITABLE_SYSTEM_VARIABLE
|
||||
|
||||
Reference in New Issue
Block a user