Refactor: use logger = logging.getLogger(__name__) in logging (#24515)

Co-authored-by: Yongtao Huang <99629139+hyongtao-db@users.noreply.github.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
This commit is contained in:
Yongtao Huang
2025-08-26 18:10:31 +08:00
committed by GitHub
parent 8af2ae973f
commit fa753239ad
102 changed files with 565 additions and 401 deletions

View File

@@ -8,6 +8,8 @@ from core.app.entities.task_entities import AppBlockingResponse, AppStreamRespon
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
logger = logging.getLogger(__name__)
class AppGenerateResponseConverter(ABC):
_blocking_response_type: type[AppBlockingResponse]
@@ -120,7 +122,7 @@ class AppGenerateResponseConverter(ABC):
if data:
data.setdefault("message", getattr(e, "description", str(e)))
else:
logging.error(e)
logger.error(e)
data = {
"code": "internal_server_error",
"message": "Internal Server Error, please contact support.",

View File

@@ -32,6 +32,8 @@ from extensions.ext_database import db
from models.model import AppMode, Conversation, MessageAnnotation, MessageFile
from services.annotation_service import AppAnnotationService
logger = logging.getLogger(__name__)
class MessageCycleManager:
def __init__(
@@ -98,7 +100,7 @@ class MessageCycleManager:
conversation.name = name
except Exception as e:
if dify_config.DEBUG:
logging.exception("generate conversation name failed, conversation_id: %s", conversation_id)
logger.exception("generate conversation name failed, conversation_id: %s", conversation_id)
pass
db.session.merge(conversation)

View File

@@ -10,6 +10,8 @@ from pydantic import BaseModel
from core.helper.position_helper import sort_to_dict_by_position_map
logger = logging.getLogger(__name__)
class ExtensionModule(enum.Enum):
MODERATION = "moderation"
@@ -66,7 +68,7 @@ class Extensible:
# Check for extension module file
if (extension_name + ".py") not in file_names:
logging.warning("Missing %s.py file in %s, Skip.", extension_name, subdir_path)
logger.warning("Missing %s.py file in %s, Skip.", extension_name, subdir_path)
continue
# Check for builtin flag and position
@@ -95,7 +97,7 @@ class Extensible:
break
if not extension_class:
logging.warning("Missing subclass of %s in %s, Skip.", cls.__name__, module_name)
logger.warning("Missing subclass of %s in %s, Skip.", cls.__name__, module_name)
continue
# Load schema if not builtin
@@ -103,7 +105,7 @@ class Extensible:
if not builtin:
json_path = os.path.join(subdir_path, "schema.json")
if not os.path.exists(json_path):
logging.warning("Missing schema.json file in %s, Skip.", subdir_path)
logger.warning("Missing schema.json file in %s, Skip.", subdir_path)
continue
with open(json_path, encoding="utf-8") as f:
@@ -122,7 +124,7 @@ class Extensible:
)
except Exception as e:
logging.exception("Error scanning extensions")
logger.exception("Error scanning extensions")
raise
# Sort extensions by position

View File

@@ -4,6 +4,8 @@ import sys
from types import ModuleType
from typing import AnyStr
logger = logging.getLogger(__name__)
def import_module_from_source(*, module_name: str, py_file_path: AnyStr, use_lazy_loader: bool = False) -> ModuleType:
"""
@@ -30,7 +32,7 @@ def import_module_from_source(*, module_name: str, py_file_path: AnyStr, use_laz
spec.loader.exec_module(module)
return module
except Exception as e:
logging.exception("Failed to load module %s from script file '%s'", module_name, repr(py_file_path))
logger.exception("Failed to load module %s from script file '%s'", module_name, repr(py_file_path))
raise e

View File

@@ -9,6 +9,8 @@ import httpx
from configs import dify_config
logger = logging.getLogger(__name__)
SSRF_DEFAULT_MAX_RETRIES = dify_config.SSRF_DEFAULT_MAX_RETRIES
HTTP_REQUEST_NODE_SSL_VERIFY = True # Default value for HTTP_REQUEST_NODE_SSL_VERIFY is True
@@ -73,12 +75,12 @@ def make_request(method, url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs):
if response.status_code not in STATUS_FORCELIST:
return response
else:
logging.warning(
logger.warning(
"Received status code %s for URL %s which is in the force list", response.status_code, url
)
except httpx.RequestError as e:
logging.warning("Request to URL %s failed on attempt %s: %s", url, retries + 1, e)
logger.warning("Request to URL %s failed on attempt %s: %s", url, retries + 1, e)
if max_retries == 0:
raise

View File

@@ -39,6 +39,8 @@ from models.dataset import Document as DatasetDocument
from models.model import UploadFile
from services.feature_service import FeatureService
logger = logging.getLogger(__name__)
class IndexingRunner:
def __init__(self):
@@ -90,9 +92,9 @@ class IndexingRunner:
dataset_document.stopped_at = naive_utc_now()
db.session.commit()
except ObjectDeletedError:
logging.warning("Document deleted, document id: %s", dataset_document.id)
logger.warning("Document deleted, document id: %s", dataset_document.id)
except Exception as e:
logging.exception("consume document failed")
logger.exception("consume document failed")
dataset_document.indexing_status = "error"
dataset_document.error = str(e)
dataset_document.stopped_at = naive_utc_now()
@@ -153,7 +155,7 @@ class IndexingRunner:
dataset_document.stopped_at = naive_utc_now()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
logger.exception("consume document failed")
dataset_document.indexing_status = "error"
dataset_document.error = str(e)
dataset_document.stopped_at = naive_utc_now()
@@ -228,7 +230,7 @@ class IndexingRunner:
dataset_document.stopped_at = naive_utc_now()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
logger.exception("consume document failed")
dataset_document.indexing_status = "error"
dataset_document.error = str(e)
dataset_document.stopped_at = naive_utc_now()
@@ -321,7 +323,7 @@ class IndexingRunner:
try:
storage.delete(image_file.key)
except Exception:
logging.exception(
logger.exception(
"Delete image_files failed while indexing_estimate, \
image_upload_file_is: %s",
upload_file_id,

View File

@@ -31,6 +31,8 @@ from core.workflow.entities.workflow_node_execution import WorkflowNodeExecution
from core.workflow.graph_engine.entities.event import AgentLogEvent
from models import App, Message, WorkflowNodeExecutionModel, db
logger = logging.getLogger(__name__)
class LLMGenerator:
@classmethod
@@ -68,7 +70,7 @@ class LLMGenerator:
result_dict = json.loads(cleaned_answer)
answer = result_dict["Your Output"]
except json.JSONDecodeError as e:
logging.exception("Failed to generate name after answer, use query instead")
logger.exception("Failed to generate name after answer, use query instead")
answer = query
name = answer.strip()
@@ -125,7 +127,7 @@ class LLMGenerator:
except InvokeError:
questions = []
except Exception:
logging.exception("Failed to generate suggested questions after answer")
logger.exception("Failed to generate suggested questions after answer")
questions = []
return questions
@@ -173,7 +175,7 @@ class LLMGenerator:
error = str(e)
error_step = "generate rule config"
except Exception as e:
logging.exception("Failed to generate rule config, model: %s", model_config.get("name"))
logger.exception("Failed to generate rule config, model: %s", model_config.get("name"))
rule_config["error"] = str(e)
rule_config["error"] = f"Failed to {error_step}. Error: {error}" if error else ""
@@ -270,7 +272,7 @@ class LLMGenerator:
error_step = "generate conversation opener"
except Exception as e:
logging.exception("Failed to generate rule config, model: %s", model_config.get("name"))
logger.exception("Failed to generate rule config, model: %s", model_config.get("name"))
rule_config["error"] = str(e)
rule_config["error"] = f"Failed to {error_step}. Error: {error}" if error else ""
@@ -319,7 +321,7 @@ class LLMGenerator:
error = str(e)
return {"code": "", "language": code_language, "error": f"Failed to generate code. Error: {error}"}
except Exception as e:
logging.exception(
logger.exception(
"Failed to invoke LLM model, model: %s, language: %s", model_config.get("name"), code_language
)
return {"code": "", "language": code_language, "error": f"An unexpected error occurred: {str(e)}"}
@@ -392,7 +394,7 @@ class LLMGenerator:
error = str(e)
return {"output": "", "error": f"Failed to generate JSON Schema. Error: {error}"}
except Exception as e:
logging.exception("Failed to invoke LLM model, model: %s", model_config.get("name"))
logger.exception("Failed to invoke LLM model, model: %s", model_config.get("name"))
return {"output": "", "error": f"An unexpected error occurred: {str(e)}"}
@staticmethod
@@ -570,5 +572,5 @@ class LLMGenerator:
error = str(e)
return {"error": f"Failed to generate code. Error: {error}"}
except Exception as e:
logging.exception("Failed to invoke LLM model, model: %s", model_config.get("name"), exc_info=e)
logger.exception("Failed to invoke LLM model, model: " + json.dumps(model_config.get("name")), exc_info=e)
return {"error": f"An unexpected error occurred: {str(e)}"}

View File

@@ -152,7 +152,7 @@ class MCPClient:
# ExitStack will handle proper cleanup of all managed context managers
self._exit_stack.close()
except Exception as e:
logging.exception("Error during cleanup")
logger.exception("Error during cleanup")
raise ValueError(f"Error during cleanup: {e}")
finally:
self._session = None

View File

@@ -31,6 +31,9 @@ from core.mcp.types import (
SessionMessage,
)
logger = logging.getLogger(__name__)
SendRequestT = TypeVar("SendRequestT", ClientRequest, ServerRequest)
SendResultT = TypeVar("SendResultT", ClientResult, ServerResult)
SendNotificationT = TypeVar("SendNotificationT", ClientNotification, ServerNotification)
@@ -366,7 +369,7 @@ class BaseSession(
self._handle_incoming(notification)
except Exception as e:
# For other validation errors, log and continue
logging.warning("Failed to validate notification: %s. Message was: %s", e, message.message.root)
logger.warning("Failed to validate notification: %s. Message was: %s", e, message.message.root)
else: # Response or error
response_queue = self._response_streams.get(message.message.root.id)
if response_queue is not None:
@@ -376,7 +379,7 @@ class BaseSession(
except queue.Empty:
continue
except Exception:
logging.exception("Error in message processing loop")
logger.exception("Error in message processing loop")
raise
def _received_request(self, responder: RequestResponder[ReceiveRequestT, SendResultT]) -> None:

View File

@@ -306,7 +306,7 @@ class AliyunDataTrace(BaseTraceInstance):
node_span = self.build_workflow_task_span(trace_id, workflow_span_id, trace_info, node_execution)
return node_span
except Exception as e:
logging.debug("Error occurred in build_workflow_node_span: %s", e, exc_info=True)
logger.debug("Error occurred in build_workflow_node_span: %s", e, exc_info=True)
return None
def get_workflow_node_status(self, node_execution: WorkflowNodeExecution) -> Status:

View File

@@ -37,6 +37,8 @@ from models.model import App, AppModelConfig, Conversation, Message, MessageFile
from models.workflow import WorkflowAppLog, WorkflowRun
from tasks.ops_trace_task import process_trace_tasks
logger = logging.getLogger(__name__)
class OpsTraceProviderConfigMap(dict[str, dict[str, Any]]):
def __getitem__(self, provider: str) -> dict[str, Any]:
@@ -287,7 +289,7 @@ class OpsTraceManager:
# create new tracing_instance and update the cache if it absent
tracing_instance = trace_instance(config_class(**decrypt_trace_config))
cls.ops_trace_instances_cache[decrypt_trace_config_key] = tracing_instance
logging.info("new tracing_instance for app_id: %s", app_id)
logger.info("new tracing_instance for app_id: %s", app_id)
return tracing_instance
@classmethod
@@ -849,7 +851,7 @@ class TraceQueueManager:
trace_task.app_id = self.app_id
trace_manager_queue.put(trace_task)
except Exception as e:
logging.exception("Error adding trace task, trace_type %s", trace_task.trace_type)
logger.exception("Error adding trace task, trace_type %s", trace_task.trace_type)
finally:
self.start_timer()
@@ -868,7 +870,7 @@ class TraceQueueManager:
if tasks:
self.send_to_celery(tasks)
except Exception as e:
logging.exception("Error processing trace tasks")
logger.exception("Error processing trace tasks")
def start_timer(self):
global trace_manager_timer

View File

@@ -141,11 +141,11 @@ class BasePluginClient:
response.raise_for_status()
except HTTPError as e:
msg = f"Failed to request plugin daemon, status: {e.response.status_code}, url: {path}"
logging.exception(msg)
logger.exception(msg)
raise e
except Exception as e:
msg = f"Failed to request plugin daemon, url: {path}"
logging.exception(msg)
logger.exception(msg)
raise ValueError(msg) from e
try:
@@ -158,7 +158,7 @@ class BasePluginClient:
f"Failed to parse response from plugin daemon to PluginDaemonBasicResponse [{str(type.__name__)}],"
f" url: {path}"
)
logging.exception(msg)
logger.exception(msg)
raise ValueError(msg)
if rep.code != 0:

View File

@@ -15,6 +15,8 @@ from core.rag.embedding.embedding_base import Embeddings
from core.rag.models.document import Document
from models.dataset import Dataset
logger = logging.getLogger(__name__)
class MyScaleConfig(BaseModel):
host: str
@@ -53,7 +55,7 @@ class MyScaleVector(BaseVector):
return self.add_texts(documents=texts, embeddings=embeddings, **kwargs)
def _create_collection(self, dimension: int):
logging.info("create MyScale collection %s with dimension %s", self._collection_name, dimension)
logger.info("create MyScale collection %s with dimension %s", self._collection_name, dimension)
self._client.command(f"CREATE DATABASE IF NOT EXISTS {self._config.database}")
fts_params = f"('{self._config.fts_params}')" if self._config.fts_params else ""
sql = f"""
@@ -151,7 +153,7 @@ class MyScaleVector(BaseVector):
for r in self._client.query(sql).named_results()
]
except Exception as e:
logging.exception("\033[91m\033[1m%s\033[0m \033[95m%s\033[0m", type(e), str(e)) # noqa:TRY401
logger.exception("\033[91m\033[1m%s\033[0m \033[95m%s\033[0m", type(e), str(e)) # noqa:TRY401
return []
def delete(self) -> None:

View File

@@ -19,6 +19,8 @@ from core.rag.models.document import Document
from extensions.ext_redis import redis_client
from models.dataset import Dataset
logger = logging.getLogger(__name__)
class PGVectorConfig(BaseModel):
host: str
@@ -155,7 +157,7 @@ class PGVector(BaseVector):
cur.execute(f"DELETE FROM {self.table_name} WHERE id IN %s", (tuple(ids),))
except psycopg2.errors.UndefinedTable:
# table not exists
logging.warning("Table %s not found, skipping delete operation.", self.table_name)
logger.warning("Table %s not found, skipping delete operation.", self.table_name)
return
except Exception as e:
raise e

View File

@@ -17,6 +17,8 @@ from core.rag.models.document import Document
from extensions.ext_redis import redis_client
from models import Dataset
logger = logging.getLogger(__name__)
class TableStoreConfig(BaseModel):
access_key_id: Optional[str] = None
@@ -145,7 +147,7 @@ class TableStoreVector(BaseVector):
with redis_client.lock(lock_name, timeout=20):
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
if redis_client.get(collection_exist_cache_key):
logging.info("Collection %s already exists.", self._collection_name)
logger.info("Collection %s already exists.", self._collection_name)
return
self._create_table_if_not_exist()
@@ -155,7 +157,7 @@ class TableStoreVector(BaseVector):
def _create_table_if_not_exist(self) -> None:
table_list = self._tablestore_client.list_table()
if self._table_name in table_list:
logging.info("Tablestore system table[%s] already exists", self._table_name)
logger.info("Tablestore system table[%s] already exists", self._table_name)
return None
schema_of_primary_key = [("id", "STRING")]
@@ -163,12 +165,12 @@ class TableStoreVector(BaseVector):
table_options = tablestore.TableOptions()
reserved_throughput = tablestore.ReservedThroughput(tablestore.CapacityUnit(0, 0))
self._tablestore_client.create_table(table_meta, table_options, reserved_throughput)
logging.info("Tablestore create table[%s] successfully.", self._table_name)
logger.info("Tablestore create table[%s] successfully.", self._table_name)
def _create_search_index_if_not_exist(self, dimension: int) -> None:
search_index_list = self._tablestore_client.list_search_index(table_name=self._table_name)
if self._index_name in [t[1] for t in search_index_list]:
logging.info("Tablestore system index[%s] already exists", self._index_name)
logger.info("Tablestore system index[%s] already exists", self._index_name)
return None
field_schemas = [
@@ -206,20 +208,20 @@ class TableStoreVector(BaseVector):
index_meta = tablestore.SearchIndexMeta(field_schemas)
self._tablestore_client.create_search_index(self._table_name, self._index_name, index_meta)
logging.info("Tablestore create system index[%s] successfully.", self._index_name)
logger.info("Tablestore create system index[%s] successfully.", self._index_name)
def _delete_table_if_exist(self):
search_index_list = self._tablestore_client.list_search_index(table_name=self._table_name)
for resp_tuple in search_index_list:
self._tablestore_client.delete_search_index(resp_tuple[0], resp_tuple[1])
logging.info("Tablestore delete index[%s] successfully.", self._index_name)
logger.info("Tablestore delete index[%s] successfully.", self._index_name)
self._tablestore_client.delete_table(self._table_name)
logging.info("Tablestore delete system table[%s] successfully.", self._index_name)
logger.info("Tablestore delete system table[%s] successfully.", self._index_name)
def _delete_search_index(self) -> None:
self._tablestore_client.delete_search_index(self._table_name, self._index_name)
logging.info("Tablestore delete index[%s] successfully.", self._index_name)
logger.info("Tablestore delete index[%s] successfully.", self._index_name)
def _write_row(self, primary_key: str, attributes: dict[str, Any]) -> None:
pk = [("id", primary_key)]

View File

@@ -75,7 +75,7 @@ class CacheEmbedding(Embeddings):
except IntegrityError:
db.session.rollback()
except Exception:
logging.exception("Failed transform embedding")
logger.exception("Failed transform embedding")
cache_embeddings = []
try:
for i, n_embedding in zip(embedding_queue_indices, embedding_queue_embeddings):
@@ -122,7 +122,7 @@ class CacheEmbedding(Embeddings):
raise ValueError("Normalized embedding is nan please try again")
except Exception as ex:
if dify_config.DEBUG:
logging.exception("Failed to embed query text '%s...(%s chars)'", text[:10], len(text))
logger.exception("Failed to embed query text '%s...(%s chars)'", text[:10], len(text))
raise ex
try:
@@ -136,7 +136,7 @@ class CacheEmbedding(Embeddings):
redis_client.setex(embedding_cache_key, 600, encoded_str)
except Exception as ex:
if dify_config.DEBUG:
logging.exception(
logger.exception(
"Failed to add embedding to redis for the text '%s...(%s chars)'", text[:10], len(text)
)
raise ex

View File

@@ -23,6 +23,8 @@ from libs import helper
from models.dataset import Dataset
from services.entities.knowledge_entities.knowledge_entities import Rule
logger = logging.getLogger(__name__)
class QAIndexProcessor(BaseIndexProcessor):
def extract(self, extract_setting: ExtractSetting, **kwargs) -> list[Document]:
@@ -182,7 +184,7 @@ class QAIndexProcessor(BaseIndexProcessor):
qa_documents.append(qa_document)
format_documents.extend(qa_documents)
except Exception as e:
logging.exception("Failed to format qa document")
logger.exception("Failed to format qa document")
all_qa_documents.extend(format_documents)