make logging not use f-str, change others to f-str (#22882)

This commit is contained in:
Asuka Minato
2025-07-25 11:32:48 +09:00
committed by GitHub
parent 570aee5fe6
commit a189d293f8
164 changed files with 557 additions and 563 deletions

View File

@@ -332,9 +332,9 @@ class AccountService:
db.session.add(account_integrate)
db.session.commit()
logging.info(f"Account {account.id} linked {provider} account {open_id}.")
logging.info("Account %s linked %s account %s.", account.id, provider, open_id)
except Exception as e:
logging.exception(f"Failed to link {provider} account {open_id} to Account {account.id}")
logging.exception("Failed to link %s account %s to Account %s", provider, open_id, account.id)
raise LinkAccountIntegrateError("Failed to link account.") from e
@staticmethod
@@ -906,7 +906,7 @@ class TenantService:
"""Create tenant member"""
if role == TenantAccountRole.OWNER.value:
if TenantService.has_roles(tenant, [TenantAccountRole.OWNER]):
logging.error(f"Tenant {tenant.id} has already an owner.")
logging.error("Tenant %s has already an owner.", tenant.id)
raise Exception("Tenant already has an owner.")
ta = db.session.query(TenantAccountJoin).filter_by(tenant_id=tenant.id, account_id=account.id).first()
@@ -1158,7 +1158,7 @@ class RegisterService:
db.session.query(Tenant).delete()
db.session.commit()
logging.exception(f"Setup account failed, email: {email}, name: {name}")
logging.exception("Setup account failed, email: %s, name: %s", email, name)
raise ValueError(f"Setup failed: {e}")
@classmethod
@@ -1282,7 +1282,7 @@ class RegisterService:
def revoke_token(cls, workspace_id: str, email: str, token: str):
if workspace_id and email:
email_hash = sha256(email.encode()).hexdigest()
cache_key = "member_invite_token:{}, {}:{}".format(workspace_id, email_hash, token)
cache_key = f"member_invite_token:{workspace_id}, {email_hash}:{token}"
redis_client.delete(cache_key)
else:
redis_client.delete(cls._get_invitation_token_key(token))

View File

@@ -74,14 +74,14 @@ class AppAnnotationService:
@classmethod
def enable_app_annotation(cls, args: dict, app_id: str) -> dict:
enable_app_annotation_key = "enable_app_annotation_{}".format(str(app_id))
enable_app_annotation_key = f"enable_app_annotation_{str(app_id)}"
cache_result = redis_client.get(enable_app_annotation_key)
if cache_result is not None:
return {"job_id": cache_result, "job_status": "processing"}
# async job
job_id = str(uuid.uuid4())
enable_app_annotation_job_key = "enable_app_annotation_job_{}".format(str(job_id))
enable_app_annotation_job_key = f"enable_app_annotation_job_{str(job_id)}"
# send batch add segments task
redis_client.setnx(enable_app_annotation_job_key, "waiting")
enable_annotation_reply_task.delay(
@@ -97,14 +97,14 @@ class AppAnnotationService:
@classmethod
def disable_app_annotation(cls, app_id: str) -> dict:
disable_app_annotation_key = "disable_app_annotation_{}".format(str(app_id))
disable_app_annotation_key = f"disable_app_annotation_{str(app_id)}"
cache_result = redis_client.get(disable_app_annotation_key)
if cache_result is not None:
return {"job_id": cache_result, "job_status": "processing"}
# async job
job_id = str(uuid.uuid4())
disable_app_annotation_job_key = "disable_app_annotation_job_{}".format(str(job_id))
disable_app_annotation_job_key = f"disable_app_annotation_job_{str(job_id)}"
# send batch add segments task
redis_client.setnx(disable_app_annotation_job_key, "waiting")
disable_annotation_reply_task.delay(str(job_id), app_id, current_user.current_tenant_id)
@@ -127,8 +127,8 @@ class AppAnnotationService:
.where(MessageAnnotation.app_id == app_id)
.where(
or_(
MessageAnnotation.question.ilike("%{}%".format(keyword)),
MessageAnnotation.content.ilike("%{}%".format(keyword)),
MessageAnnotation.question.ilike(f"%{keyword}%"),
MessageAnnotation.content.ilike(f"%{keyword}%"),
)
)
.order_by(MessageAnnotation.created_at.desc(), MessageAnnotation.id.desc())
@@ -295,7 +295,7 @@ class AppAnnotationService:
raise ValueError("The number of annotations exceeds the limit of your subscription.")
# async job
job_id = str(uuid.uuid4())
indexing_cache_key = "app_annotation_batch_import_{}".format(str(job_id))
indexing_cache_key = f"app_annotation_batch_import_{str(job_id)}"
# send batch add segments task
redis_client.setnx(indexing_cache_key, "waiting")
batch_import_annotations_task.delay(

View File

@@ -102,4 +102,4 @@ class APIBasedExtensionService:
if resp.get("result") != "pong":
raise ValueError(resp)
except Exception as e:
raise ValueError("connection error: {}".format(e))
raise ValueError(f"connection error: {e}")

View File

@@ -94,7 +94,7 @@ class AppService:
except (ProviderTokenNotInitError, LLMBadRequestError):
model_instance = None
except Exception as e:
logging.exception(f"Get default model instance failed, tenant_id: {tenant_id}")
logging.exception("Get default model instance failed, tenant_id: %s", tenant_id)
model_instance = None
if model_instance:

View File

@@ -228,7 +228,7 @@ class ClearFreePlanTenantExpiredLogs:
# only process sandbox tenant
cls.process_tenant(flask_app, tenant_id, days, batch)
except Exception:
logger.exception(f"Failed to process tenant {tenant_id}")
logger.exception("Failed to process tenant %s", tenant_id)
finally:
nonlocal handled_tenant_count
handled_tenant_count += 1
@@ -311,7 +311,7 @@ class ClearFreePlanTenantExpiredLogs:
try:
tenants.append(tenant_id)
except Exception:
logger.exception(f"Failed to process tenant {tenant_id}")
logger.exception("Failed to process tenant %s", tenant_id)
continue
futures.append(

View File

@@ -605,8 +605,9 @@ class DatasetService:
except ProviderTokenNotInitError:
# If we can't get the embedding model, preserve existing settings
logging.warning(
f"Failed to initialize embedding model {data['embedding_model_provider']}/{data['embedding_model']}, "
f"preserving existing settings"
"Failed to initialize embedding model %s/%s, preserving existing settings",
data["embedding_model_provider"],
data["embedding_model"],
)
if dataset.embedding_model_provider and dataset.embedding_model:
filtered_data["embedding_model_provider"] = dataset.embedding_model_provider
@@ -649,11 +650,11 @@ class DatasetService:
@staticmethod
def check_dataset_permission(dataset, user):
if dataset.tenant_id != user.current_tenant_id:
logging.debug(f"User {user.id} does not have permission to access dataset {dataset.id}")
logging.debug("User %s does not have permission to access dataset %s", user.id, dataset.id)
raise NoPermissionError("You do not have permission to access this dataset.")
if user.current_role != TenantAccountRole.OWNER:
if dataset.permission == DatasetPermissionEnum.ONLY_ME and dataset.created_by != user.id:
logging.debug(f"User {user.id} does not have permission to access dataset {dataset.id}")
logging.debug("User %s does not have permission to access dataset %s", user.id, dataset.id)
raise NoPermissionError("You do not have permission to access this dataset.")
if dataset.permission == DatasetPermissionEnum.PARTIAL_TEAM:
# For partial team permission, user needs explicit permission or be the creator
@@ -662,7 +663,7 @@ class DatasetService:
db.session.query(DatasetPermission).filter_by(dataset_id=dataset.id, account_id=user.id).first()
)
if not user_permission:
logging.debug(f"User {user.id} does not have permission to access dataset {dataset.id}")
logging.debug("User %s does not have permission to access dataset %s", user.id, dataset.id)
raise NoPermissionError("You do not have permission to access this dataset.")
@staticmethod
@@ -1000,7 +1001,7 @@ class DocumentService:
db.session.add(document)
db.session.commit()
# set document paused flag
indexing_cache_key = "document_{}_is_paused".format(document.id)
indexing_cache_key = f"document_{document.id}_is_paused"
redis_client.setnx(indexing_cache_key, "True")
@staticmethod
@@ -1015,7 +1016,7 @@ class DocumentService:
db.session.add(document)
db.session.commit()
# delete paused flag
indexing_cache_key = "document_{}_is_paused".format(document.id)
indexing_cache_key = f"document_{document.id}_is_paused"
redis_client.delete(indexing_cache_key)
# trigger async task
recover_document_indexing_task.delay(document.dataset_id, document.id)
@@ -1024,7 +1025,7 @@ class DocumentService:
def retry_document(dataset_id: str, documents: list[Document]):
for document in documents:
# add retry flag
retry_indexing_cache_key = "document_{}_is_retried".format(document.id)
retry_indexing_cache_key = f"document_{document.id}_is_retried"
cache_result = redis_client.get(retry_indexing_cache_key)
if cache_result is not None:
raise ValueError("Document is being retried, please try again later")
@@ -1041,7 +1042,7 @@ class DocumentService:
@staticmethod
def sync_website_document(dataset_id: str, document: Document):
# add sync flag
sync_indexing_cache_key = "document_{}_is_sync".format(document.id)
sync_indexing_cache_key = f"document_{document.id}_is_sync"
cache_result = redis_client.get(sync_indexing_cache_key)
if cache_result is not None:
raise ValueError("Document is being synced, please try again later")
@@ -1174,12 +1175,13 @@ class DocumentService:
)
else:
logging.warning(
f"Invalid process rule mode: {process_rule.mode}, can not find dataset process rule"
"Invalid process rule mode: %s, can not find dataset process rule",
process_rule.mode,
)
return
db.session.add(dataset_process_rule)
db.session.commit()
lock_name = "add_document_lock_dataset_id_{}".format(dataset.id)
lock_name = f"add_document_lock_dataset_id_{dataset.id}"
with redis_client.lock(lock_name, timeout=600):
position = DocumentService.get_documents_position(dataset.id)
document_ids = []
@@ -1862,7 +1864,7 @@ class DocumentService:
task_func.delay(*task_args)
except Exception as e:
# Log the error but do not rollback the transaction
logging.exception(f"Error executing async task for document {update_info['document'].id}")
logging.exception("Error executing async task for document %s", update_info["document"].id)
# don't raise the error immediately, but capture it for later
propagation_error = e
try:
@@ -1873,7 +1875,7 @@ class DocumentService:
redis_client.setex(indexing_cache_key, 600, 1)
except Exception as e:
# Log the error but do not rollback the transaction
logging.exception(f"Error setting cache for document {update_info['document'].id}")
logging.exception("Error setting cache for document %s", update_info["document"].id)
# Raise any propagation error after all updates
if propagation_error:
raise propagation_error
@@ -2001,7 +2003,7 @@ class SegmentService:
)
# calc embedding use tokens
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content])[0]
lock_name = "add_segment_lock_document_id_{}".format(document.id)
lock_name = f"add_segment_lock_document_id_{document.id}"
with redis_client.lock(lock_name, timeout=600):
max_position = (
db.session.query(func.max(DocumentSegment.position))
@@ -2048,7 +2050,7 @@ class SegmentService:
@classmethod
def multi_create_segment(cls, segments: list, document: Document, dataset: Dataset):
lock_name = "multi_add_segment_lock_document_id_{}".format(document.id)
lock_name = f"multi_add_segment_lock_document_id_{document.id}"
increment_word_count = 0
with redis_client.lock(lock_name, timeout=600):
embedding_model = None
@@ -2130,7 +2132,7 @@ class SegmentService:
@classmethod
def update_segment(cls, args: SegmentUpdateArgs, segment: DocumentSegment, document: Document, dataset: Dataset):
indexing_cache_key = "segment_{}_indexing".format(segment.id)
indexing_cache_key = f"segment_{segment.id}_indexing"
cache_result = redis_client.get(indexing_cache_key)
if cache_result is not None:
raise ValueError("Segment is indexing, please try again later")
@@ -2300,7 +2302,7 @@ class SegmentService:
@classmethod
def delete_segment(cls, segment: DocumentSegment, document: Document, dataset: Dataset):
indexing_cache_key = "segment_{}_delete_indexing".format(segment.id)
indexing_cache_key = f"segment_{segment.id}_delete_indexing"
cache_result = redis_client.get(indexing_cache_key)
if cache_result is not None:
raise ValueError("Segment is deleting.")
@@ -2352,7 +2354,7 @@ class SegmentService:
return
real_deal_segmment_ids = []
for segment in segments:
indexing_cache_key = "segment_{}_indexing".format(segment.id)
indexing_cache_key = f"segment_{segment.id}_indexing"
cache_result = redis_client.get(indexing_cache_key)
if cache_result is not None:
continue
@@ -2379,7 +2381,7 @@ class SegmentService:
return
real_deal_segmment_ids = []
for segment in segments:
indexing_cache_key = "segment_{}_indexing".format(segment.id)
indexing_cache_key = f"segment_{segment.id}_indexing"
cache_result = redis_client.get(indexing_cache_key)
if cache_result is not None:
continue
@@ -2398,7 +2400,7 @@ class SegmentService:
def create_child_chunk(
cls, content: str, segment: DocumentSegment, document: Document, dataset: Dataset
) -> ChildChunk:
lock_name = "add_child_lock_{}".format(segment.id)
lock_name = f"add_child_lock_{segment.id}"
with redis_client.lock(lock_name, timeout=20):
index_node_id = str(uuid.uuid4())
index_node_hash = helper.generate_text_hash(content)

View File

@@ -77,7 +77,7 @@ class HitTestingService:
)
end = time.perf_counter()
logging.debug(f"Hit testing retrieve in {end - start:0.4f} seconds")
logging.debug("Hit testing retrieve in %s seconds", end - start)
dataset_query = DatasetQuery(
dataset_id=dataset.id, content=query, source="hit_testing", created_by_role="account", created_by=account.id
@@ -113,7 +113,7 @@ class HitTestingService:
)
end = time.perf_counter()
logging.debug(f"External knowledge hit testing retrieve in {end - start:0.4f} seconds")
logging.debug("External knowledge hit testing retrieve in %s seconds", end - start)
dataset_query = DatasetQuery(
dataset_id=dataset.id, content=query, source="hit_testing", created_by_role="account", created_by=account.id

View File

@@ -340,7 +340,7 @@ class ModelLoadBalancingService:
config_id = str(config_id)
if config_id not in current_load_balancing_configs_dict:
raise ValueError("Invalid load balancing config id: {}".format(config_id))
raise ValueError(f"Invalid load balancing config id: {config_id}")
updated_config_ids.add(config_id)
@@ -349,7 +349,7 @@ class ModelLoadBalancingService:
# check duplicate name
for current_load_balancing_config in current_load_balancing_configs:
if current_load_balancing_config.id != config_id and current_load_balancing_config.name == name:
raise ValueError("Load balancing config name {} already exists".format(name))
raise ValueError(f"Load balancing config name {name} already exists")
if credentials:
if not isinstance(credentials, dict):
@@ -383,7 +383,7 @@ class ModelLoadBalancingService:
# check duplicate name
for current_load_balancing_config in current_load_balancing_configs:
if current_load_balancing_config.name == name:
raise ValueError("Load balancing config name {} already exists".format(name))
raise ValueError(f"Load balancing config name {name} already exists")
if not credentials:
raise ValueError("Invalid load balancing config credentials")

View File

@@ -380,7 +380,7 @@ class ModelProviderService:
else None
)
except Exception as e:
logger.debug(f"get_default_model_of_model_type error: {e}")
logger.debug("get_default_model_of_model_type error: %s", e)
return None
def update_default_model_of_model_type(self, tenant_id: str, model_type: str, provider: str, model: str) -> None:

View File

@@ -65,9 +65,7 @@ class OpsService:
}
)
except Exception:
new_decrypt_tracing_config.update(
{"project_url": "{host}/".format(host=decrypt_tracing_config.get("host"))}
)
new_decrypt_tracing_config.update({"project_url": f"{decrypt_tracing_config.get('host')}/"})
if tracing_provider == "langsmith" and (
"project_url" not in decrypt_tracing_config or not decrypt_tracing_config.get("project_url")
@@ -139,7 +137,7 @@ class OpsService:
project_url = OpsTraceManager.get_trace_config_project_url(tracing_config, tracing_provider)
elif tracing_provider == "langfuse":
project_key = OpsTraceManager.get_trace_config_project_key(tracing_config, tracing_provider)
project_url = "{host}/project/{key}".format(host=tracing_config.get("host"), key=project_key)
project_url = f"{tracing_config.get('host')}/project/{project_key}"
elif tracing_provider in ("langsmith", "opik"):
project_url = OpsTraceManager.get_trace_config_project_url(tracing_config, tracing_provider)
else:

View File

@@ -110,7 +110,7 @@ limit 1000"""
)
)
logger.exception(
f"[{processed_count}] Failed to migrate [{table_name}] {record_id} ({provider_name})"
"[%s] Failed to migrate [%s] %s (%s)", processed_count, table_name, record_id, provider_name
)
continue
@@ -183,7 +183,7 @@ limit 1000"""
)
)
logger.exception(
f"[{processed_count}] Failed to migrate [{table_name}] {record_id} ({provider_name})"
"[%s] Failed to migrate [%s] %s (%s)", processed_count, table_name, record_id, provider_name
)
continue

View File

@@ -78,7 +78,7 @@ class PluginMigration:
)
)
except Exception:
logger.exception(f"Failed to process tenant {tenant_id}")
logger.exception("Failed to process tenant %s", tenant_id)
futures = []
@@ -136,7 +136,7 @@ class PluginMigration:
try:
tenants.append(tenant_id)
except Exception:
logger.exception(f"Failed to process tenant {tenant_id}")
logger.exception("Failed to process tenant %s", tenant_id)
continue
futures.append(
@@ -273,7 +273,7 @@ class PluginMigration:
result.append(ToolProviderID(tool_entity.provider_id).plugin_id)
except Exception:
logger.exception(f"Failed to process tool {tool}")
logger.exception("Failed to process tool %s", tool)
continue
return result
@@ -301,7 +301,7 @@ class PluginMigration:
plugins: dict[str, str] = {}
plugin_ids = []
plugin_not_exist = []
logger.info(f"Extracting unique plugins from {extracted_plugins}")
logger.info("Extracting unique plugins from %s", extracted_plugins)
with open(extracted_plugins) as f:
for line in f:
data = json.loads(line)
@@ -318,7 +318,7 @@ class PluginMigration:
else:
plugin_not_exist.append(plugin_id)
except Exception:
logger.exception(f"Failed to fetch plugin unique identifier for {plugin_id}")
logger.exception("Failed to fetch plugin unique identifier for %s", plugin_id)
plugin_not_exist.append(plugin_id)
with ThreadPoolExecutor(max_workers=10) as executor:
@@ -339,7 +339,7 @@ class PluginMigration:
# use a fake tenant id to install all the plugins
fake_tenant_id = uuid4().hex
logger.info(f"Installing {len(plugins['plugins'])} plugin instances for fake tenant {fake_tenant_id}")
logger.info("Installing %s plugin instances for fake tenant %s", len(plugins["plugins"]), fake_tenant_id)
thread_pool = ThreadPoolExecutor(max_workers=workers)
@@ -348,7 +348,7 @@ class PluginMigration:
plugin_install_failed.extend(response.get("failed", []))
def install(tenant_id: str, plugin_ids: list[str]) -> None:
logger.info(f"Installing {len(plugin_ids)} plugins for tenant {tenant_id}")
logger.info("Installing %s plugins for tenant %s", len(plugin_ids), tenant_id)
# fetch plugin already installed
installed_plugins = manager.list_plugins(tenant_id)
installed_plugins_ids = [plugin.plugin_id for plugin in installed_plugins]
@@ -408,7 +408,7 @@ class PluginMigration:
installation = manager.list_plugins(fake_tenant_id)
except Exception:
logger.exception(f"Failed to get installation for tenant {fake_tenant_id}")
logger.exception("Failed to get installation for tenant %s", fake_tenant_id)
Path(output_file).write_text(
json.dumps(
@@ -491,7 +491,9 @@ class PluginMigration:
else:
failed.append(reverse_map[plugin.plugin_unique_identifier])
logger.error(
f"Failed to install plugin {plugin.plugin_unique_identifier}, error: {plugin.message}"
"Failed to install plugin %s, error: %s",
plugin.plugin_unique_identifier,
plugin.message,
)
done = True

View File

@@ -20,7 +20,7 @@ class RemoteRecommendAppRetrieval(RecommendAppRetrievalBase):
try:
result = self.fetch_recommended_app_detail_from_dify_official(app_id)
except Exception as e:
logger.warning(f"fetch recommended app detail from dify official failed: {e}, switch to built-in.")
logger.warning("fetch recommended app detail from dify official failed: %s, switch to built-in.", e)
result = BuildInRecommendAppRetrieval.fetch_recommended_app_detail_from_builtin(app_id)
return result
@@ -28,7 +28,7 @@ class RemoteRecommendAppRetrieval(RecommendAppRetrievalBase):
try:
result = self.fetch_recommended_apps_from_dify_official(language)
except Exception as e:
logger.warning(f"fetch recommended apps from dify official failed: {e}, switch to built-in.")
logger.warning("fetch recommended apps from dify official failed: %s, switch to built-in.", e)
result = BuildInRecommendAppRetrieval.fetch_recommended_apps_from_builtin(language)
return result

View File

@@ -337,7 +337,7 @@ class BuiltinToolManageService:
max_number = max(numbers)
return f"{default_pattern} {max_number + 1}"
except Exception as e:
logger.warning(f"Error generating next provider name for {provider}: {str(e)}")
logger.warning("Error generating next provider name for %s: %s", provider, str(e))
# fallback
return f"{credential_type.get_name()} 1"

View File

@@ -275,7 +275,7 @@ class ToolTransformService:
username = user.name
except Exception:
logger.exception(f"failed to get user name for api provider {db_provider.id}")
logger.exception("failed to get user name for api provider %s", db_provider.id)
# add provider into providers
credentials = db_provider.credentials
result = ToolProviderApiEntity(