chore: cleanup ruff flake8-simplify linter rules (#8286)

Co-authored-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
Bowen Liang
2024-09-12 12:55:45 +08:00
committed by GitHub
parent 0bb7569d46
commit 0f14873255
34 changed files with 108 additions and 136 deletions

View File

@@ -127,27 +127,26 @@ class RelytVector(BaseVector):
)
chunks_table_data = []
with self.client.connect() as conn:
with conn.begin():
for document, metadata, chunk_id, embedding in zip(texts, metadatas, ids, embeddings):
chunks_table_data.append(
{
"id": chunk_id,
"embedding": embedding,
"document": document,
"metadata": metadata,
}
)
with self.client.connect() as conn, conn.begin():
for document, metadata, chunk_id, embedding in zip(texts, metadatas, ids, embeddings):
chunks_table_data.append(
{
"id": chunk_id,
"embedding": embedding,
"document": document,
"metadata": metadata,
}
)
# Execute the batch insert when the batch size is reached
if len(chunks_table_data) == 500:
conn.execute(insert(chunks_table).values(chunks_table_data))
# Clear the chunks_table_data list for the next batch
chunks_table_data.clear()
# Insert any remaining records that didn't make up a full batch
if chunks_table_data:
# Execute the batch insert when the batch size is reached
if len(chunks_table_data) == 500:
conn.execute(insert(chunks_table).values(chunks_table_data))
# Clear the chunks_table_data list for the next batch
chunks_table_data.clear()
# Insert any remaining records that didn't make up a full batch
if chunks_table_data:
conn.execute(insert(chunks_table).values(chunks_table_data))
return ids
@@ -186,11 +185,10 @@ class RelytVector(BaseVector):
)
try:
with self.client.connect() as conn:
with conn.begin():
delete_condition = chunks_table.c.id.in_(ids)
conn.execute(chunks_table.delete().where(delete_condition))
return True
with self.client.connect() as conn, conn.begin():
delete_condition = chunks_table.c.id.in_(ids)
conn.execute(chunks_table.delete().where(delete_condition))
return True
except Exception as e:
print("Delete operation failed:", str(e))
return False

View File

@@ -63,10 +63,7 @@ class TencentVector(BaseVector):
def _has_collection(self) -> bool:
collections = self._db.list_collections()
for collection in collections:
if collection.collection_name == self._collection_name:
return True
return False
return any(collection.collection_name == self._collection_name for collection in collections)
def _create_collection(self, dimension: int) -> None:
lock_name = "vector_indexing_lock_{}".format(self._collection_name)

View File

@@ -124,20 +124,19 @@ class TiDBVector(BaseVector):
texts = [d.page_content for d in documents]
chunks_table_data = []
with self._engine.connect() as conn:
with conn.begin():
for id, text, meta, embedding in zip(ids, texts, metas, embeddings):
chunks_table_data.append({"id": id, "vector": embedding, "text": text, "meta": meta})
with self._engine.connect() as conn, conn.begin():
for id, text, meta, embedding in zip(ids, texts, metas, embeddings):
chunks_table_data.append({"id": id, "vector": embedding, "text": text, "meta": meta})
# Execute the batch insert when the batch size is reached
if len(chunks_table_data) == 500:
conn.execute(insert(table).values(chunks_table_data))
# Clear the chunks_table_data list for the next batch
chunks_table_data.clear()
# Insert any remaining records that didn't make up a full batch
if chunks_table_data:
# Execute the batch insert when the batch size is reached
if len(chunks_table_data) == 500:
conn.execute(insert(table).values(chunks_table_data))
# Clear the chunks_table_data list for the next batch
chunks_table_data.clear()
# Insert any remaining records that didn't make up a full batch
if chunks_table_data:
conn.execute(insert(table).values(chunks_table_data))
return ids
def text_exists(self, id: str) -> bool:
@@ -160,11 +159,10 @@ class TiDBVector(BaseVector):
raise ValueError("No ids provided to delete.")
table = self._table(self._dimension)
try:
with self._engine.connect() as conn:
with conn.begin():
delete_condition = table.c.id.in_(ids)
conn.execute(table.delete().where(delete_condition))
return True
with self._engine.connect() as conn, conn.begin():
delete_condition = table.c.id.in_(ids)
conn.execute(table.delete().where(delete_condition))
return True
except Exception as e:
print("Delete operation failed:", str(e))
return False