Feat/dify rag (#2528)

Co-authored-by: jyong <jyong@dify.ai>
This commit is contained in:
Jyong
2024-02-22 23:31:57 +08:00
committed by GitHub
parent 97fe817186
commit 6c4e6bf1d6
119 changed files with 3181 additions and 5892 deletions

View File

@@ -0,0 +1,232 @@
import json
from collections import defaultdict
from typing import Any, Optional
from pydantic import BaseModel
from core.rag.datasource.keyword.jieba.jieba_keyword_table_handler import JiebaKeywordTableHandler
from core.rag.datasource.keyword.keyword_base import BaseKeyword
from core.rag.models.document import Document
from extensions.ext_database import db
from models.dataset import Dataset, DatasetKeywordTable, DocumentSegment
class KeywordTableConfig(BaseModel):
max_keywords_per_chunk: int = 10
class Jieba(BaseKeyword):
def __init__(self, dataset: Dataset):
super().__init__(dataset)
self._config = KeywordTableConfig()
def create(self, texts: list[Document], **kwargs) -> BaseKeyword:
keyword_table_handler = JiebaKeywordTableHandler()
keyword_table = self._get_dataset_keyword_table()
for text in texts:
keywords = keyword_table_handler.extract_keywords(text.page_content, self._config.max_keywords_per_chunk)
self._update_segment_keywords(self.dataset.id, text.metadata['doc_id'], list(keywords))
keyword_table = self._add_text_to_keyword_table(keyword_table, text.metadata['doc_id'], list(keywords))
self._save_dataset_keyword_table(keyword_table)
return self
def add_texts(self, texts: list[Document], **kwargs):
keyword_table_handler = JiebaKeywordTableHandler()
keyword_table = self._get_dataset_keyword_table()
keywords_list = kwargs.get('keywords_list', None)
for i in range(len(texts)):
text = texts[i]
if keywords_list:
keywords = keywords_list[i]
else:
keywords = keyword_table_handler.extract_keywords(text.page_content, self._config.max_keywords_per_chunk)
self._update_segment_keywords(self.dataset.id, text.metadata['doc_id'], list(keywords))
keyword_table = self._add_text_to_keyword_table(keyword_table, text.metadata['doc_id'], list(keywords))
self._save_dataset_keyword_table(keyword_table)
def text_exists(self, id: str) -> bool:
keyword_table = self._get_dataset_keyword_table()
return id in set.union(*keyword_table.values())
def delete_by_ids(self, ids: list[str]) -> None:
keyword_table = self._get_dataset_keyword_table()
keyword_table = self._delete_ids_from_keyword_table(keyword_table, ids)
self._save_dataset_keyword_table(keyword_table)
def delete_by_document_id(self, document_id: str):
# get segment ids by document_id
segments = db.session.query(DocumentSegment).filter(
DocumentSegment.dataset_id == self.dataset.id,
DocumentSegment.document_id == document_id
).all()
ids = [segment.index_node_id for segment in segments]
keyword_table = self._get_dataset_keyword_table()
keyword_table = self._delete_ids_from_keyword_table(keyword_table, ids)
self._save_dataset_keyword_table(keyword_table)
def search(
self, query: str,
**kwargs: Any
) -> list[Document]:
keyword_table = self._get_dataset_keyword_table()
k = kwargs.get('top_k', 4)
sorted_chunk_indices = self._retrieve_ids_by_query(keyword_table, query, k)
documents = []
for chunk_index in sorted_chunk_indices:
segment = db.session.query(DocumentSegment).filter(
DocumentSegment.dataset_id == self.dataset.id,
DocumentSegment.index_node_id == chunk_index
).first()
if segment:
documents.append(Document(
page_content=segment.content,
metadata={
"doc_id": chunk_index,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
))
return documents
def delete(self) -> None:
dataset_keyword_table = self.dataset.dataset_keyword_table
if dataset_keyword_table:
db.session.delete(dataset_keyword_table)
db.session.commit()
def _save_dataset_keyword_table(self, keyword_table):
keyword_table_dict = {
'__type__': 'keyword_table',
'__data__': {
"index_id": self.dataset.id,
"summary": None,
"table": keyword_table
}
}
self.dataset.dataset_keyword_table.keyword_table = json.dumps(keyword_table_dict, cls=SetEncoder)
db.session.commit()
def _get_dataset_keyword_table(self) -> Optional[dict]:
dataset_keyword_table = self.dataset.dataset_keyword_table
if dataset_keyword_table:
if dataset_keyword_table.keyword_table_dict:
return dataset_keyword_table.keyword_table_dict['__data__']['table']
else:
dataset_keyword_table = DatasetKeywordTable(
dataset_id=self.dataset.id,
keyword_table=json.dumps({
'__type__': 'keyword_table',
'__data__': {
"index_id": self.dataset.id,
"summary": None,
"table": {}
}
}, cls=SetEncoder)
)
db.session.add(dataset_keyword_table)
db.session.commit()
return {}
def _add_text_to_keyword_table(self, keyword_table: dict, id: str, keywords: list[str]) -> dict:
for keyword in keywords:
if keyword not in keyword_table:
keyword_table[keyword] = set()
keyword_table[keyword].add(id)
return keyword_table
def _delete_ids_from_keyword_table(self, keyword_table: dict, ids: list[str]) -> dict:
# get set of ids that correspond to node
node_idxs_to_delete = set(ids)
# delete node_idxs from keyword to node idxs mapping
keywords_to_delete = set()
for keyword, node_idxs in keyword_table.items():
if node_idxs_to_delete.intersection(node_idxs):
keyword_table[keyword] = node_idxs.difference(
node_idxs_to_delete
)
if not keyword_table[keyword]:
keywords_to_delete.add(keyword)
for keyword in keywords_to_delete:
del keyword_table[keyword]
return keyword_table
def _retrieve_ids_by_query(self, keyword_table: dict, query: str, k: int = 4):
keyword_table_handler = JiebaKeywordTableHandler()
keywords = keyword_table_handler.extract_keywords(query)
# go through text chunks in order of most matching keywords
chunk_indices_count: dict[str, int] = defaultdict(int)
keywords = [keyword for keyword in keywords if keyword in set(keyword_table.keys())]
for keyword in keywords:
for node_id in keyword_table[keyword]:
chunk_indices_count[node_id] += 1
sorted_chunk_indices = sorted(
list(chunk_indices_count.keys()),
key=lambda x: chunk_indices_count[x],
reverse=True,
)
return sorted_chunk_indices[: k]
def _update_segment_keywords(self, dataset_id: str, node_id: str, keywords: list[str]):
document_segment = db.session.query(DocumentSegment).filter(
DocumentSegment.dataset_id == dataset_id,
DocumentSegment.index_node_id == node_id
).first()
if document_segment:
document_segment.keywords = keywords
db.session.add(document_segment)
db.session.commit()
def create_segment_keywords(self, node_id: str, keywords: list[str]):
keyword_table = self._get_dataset_keyword_table()
self._update_segment_keywords(self.dataset.id, node_id, keywords)
keyword_table = self._add_text_to_keyword_table(keyword_table, node_id, keywords)
self._save_dataset_keyword_table(keyword_table)
def multi_create_segment_keywords(self, pre_segment_data_list: list):
keyword_table_handler = JiebaKeywordTableHandler()
keyword_table = self._get_dataset_keyword_table()
for pre_segment_data in pre_segment_data_list:
segment = pre_segment_data['segment']
if pre_segment_data['keywords']:
segment.keywords = pre_segment_data['keywords']
keyword_table = self._add_text_to_keyword_table(keyword_table, segment.index_node_id,
pre_segment_data['keywords'])
else:
keywords = keyword_table_handler.extract_keywords(segment.content,
self._config.max_keywords_per_chunk)
segment.keywords = list(keywords)
keyword_table = self._add_text_to_keyword_table(keyword_table, segment.index_node_id, list(keywords))
self._save_dataset_keyword_table(keyword_table)
def update_segment_keywords_index(self, node_id: str, keywords: list[str]):
keyword_table = self._get_dataset_keyword_table()
keyword_table = self._add_text_to_keyword_table(keyword_table, node_id, keywords)
self._save_dataset_keyword_table(keyword_table)
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return super().default(obj)

View File

@@ -0,0 +1,32 @@
import re
import jieba
from jieba.analyse import default_tfidf
from core.rag.datasource.keyword.jieba.stopwords import STOPWORDS
class JiebaKeywordTableHandler:
def __init__(self):
default_tfidf.stop_words = STOPWORDS
def extract_keywords(self, text: str, max_keywords_per_chunk: int = 10) -> set[str]:
"""Extract keywords with JIEBA tfidf."""
keywords = jieba.analyse.extract_tags(
sentence=text,
topK=max_keywords_per_chunk,
)
return set(self._expand_tokens_with_subtokens(keywords))
def _expand_tokens_with_subtokens(self, tokens: set[str]) -> set[str]:
"""Get subtokens from a list of tokens., filtering for stopwords."""
results = set()
for token in tokens:
results.add(token)
sub_tokens = re.findall(r"\w+", token)
if len(sub_tokens) > 1:
results.update({w for w in sub_tokens if w not in list(STOPWORDS)})
return results

View File

@@ -0,0 +1,90 @@
STOPWORDS = {
"during", "when", "but", "then", "further", "isn", "mustn't", "until", "own", "i", "couldn", "y", "only", "you've",
"ours", "who", "where", "ourselves", "has", "to", "was", "didn't", "themselves", "if", "against", "through", "her",
"an", "your", "can", "those", "didn", "about", "aren't", "shan't", "be", "not", "these", "again", "so", "t",
"theirs", "weren", "won't", "won", "itself", "just", "same", "while", "why", "doesn", "aren", "him", "haven",
"for", "you'll", "that", "we", "am", "d", "by", "having", "wasn't", "than", "weren't", "out", "from", "now",
"their", "too", "hadn", "o", "needn", "most", "it", "under", "needn't", "any", "some", "few", "ll", "hers", "which",
"m", "you're", "off", "other", "had", "she", "you'd", "do", "you", "does", "s", "will", "each", "wouldn't", "hasn't",
"such", "more", "whom", "she's", "my", "yours", "yourself", "of", "on", "very", "hadn't", "with", "yourselves",
"been", "ma", "them", "mightn't", "shan", "mustn", "they", "what", "both", "that'll", "how", "is", "he", "because",
"down", "haven't", "are", "no", "it's", "our", "being", "the", "or", "above", "myself", "once", "don't", "doesn't",
"as", "nor", "here", "herself", "hasn", "mightn", "have", "its", "all", "were", "ain", "this", "at", "after",
"over", "shouldn't", "into", "before", "don", "wouldn", "re", "couldn't", "wasn", "in", "should", "there",
"himself", "isn't", "should've", "doing", "ve", "shouldn", "a", "did", "and", "his", "between", "me", "up", "below",
"人民", "末##末", "", "", "", "哎呀", "哎哟", "", "", "俺们", "", "按照", "", "吧哒", "", "罢了", "", "",
"本着", "", "比方", "比如", "鄙人", "", "彼此", "", "", "别的", "别说", "", "并且", "不比", "不成", "不单", "不但",
"不独", "不管", "不光", "不过", "不仅", "不拘", "不论", "不怕", "不然", "不如", "不特", "不惟", "不问", "不只", "", "朝着",
"", "趁着", "", "", "", "除此之外", "除非", "除了", "", "此间", "此外", "", "从而", "", "", "", "但是", "",
"当着", "", "", "", "的话", "", "等等", "", "", "叮咚", "", "对于", "", "多少", "", "而况", "而且", "而是",
"而外", "而言", "而已", "尔后", "反过来", "反过来说", "反之", "非但", "非徒", "否则", "", "嘎登", "", "", "", "",
"各个", "各位", "各种", "各自", "", "根据", "", "", "故此", "固然", "关于", "", "", "果然", "果真", "", "",
"哈哈", "", "", "", "何处", "何况", "何时", "", "", "哼唷", "呼哧", "", "", "还是", "还有", "换句话说", "换言之",
"", "或是", "或者", "极了", "", "及其", "及至", "", "即便", "即或", "即令", "即若", "即使", "", "几时", "", "",
"既然", "既是", "继而", "加之", "假如", "假若", "假使", "鉴于", "", "", "较之", "", "接着", "结果", "", "紧接着",
"进而", "", "尽管", "", "经过", "", "就是", "就是说", "", "具体地说", "具体说来", "开始", "开外", "", "", "",
"可见", "可是", "可以", "况且", "", "", "来着", "", "例如", "", "", "连同", "两者", "", "", "", "另外",
"另一方面", "", "", "", "慢说", "漫说", "", "", "", "每当", "", "莫若", "", "某个", "某些", "", "",
"哪边", "哪儿", "哪个", "哪里", "哪年", "哪怕", "哪天", "哪些", "哪样", "", "那边", "那儿", "那个", "那会儿", "那里", "那么",
"那么些", "那么样", "那时", "那些", "那样", "", "乃至", "", "", "", "你们", "", "", "宁可", "宁肯", "宁愿", "",
"", "啪达", "旁人", "", "", "凭借", "", "其次", "其二", "其他", "其它", "其一", "其余", "其中", "", "起见", "岂但",
"恰恰相反", "前后", "前者", "", "然而", "然后", "然则", "", "人家", "", "任何", "任凭", "", "如此", "如果", "如何",
"如其", "如若", "如上所述", "", "若非", "若是", "", "上下", "尚且", "设若", "设使", "甚而", "甚么", "甚至", "省得", "时候",
"什么", "什么样", "使得", "", "是的", "首先", "", "谁知", "", "顺着", "似的", "", "虽然", "虽说", "虽则", "", "随着",
"", "所以", "", "他们", "他人", "", "它们", "", "她们", "", "倘或", "倘然", "倘若", "倘使", "", "", "通过", "",
"同时", "", "万一", "", "", "", "为何", "为了", "为什么", "为着", "", "嗡嗡", "", "我们", "", "呜呼", "乌乎",
"无论", "无宁", "毋宁", "", "", "相对而言", "", "", "向着", "", "", "", "沿", "沿着", "", "要不", "要不然",
"要不是", "要么", "要是", "", "也罢", "也好", "", "一般", "一旦", "一方面", "一来", "一切", "一样", "一则", "", "依照",
"", "", "以便", "以及", "以免", "以至", "以至于", "以致", "抑或", "", "因此", "因而", "因为", "", "", "",
"由此可见", "由于", "", "有的", "有关", "有些", "", "", "于是", "于是乎", "", "与此同时", "与否", "与其", "越是",
"云云", "", "再说", "再者", "", "在下", "", "咱们", "", "", "怎么", "怎么办", "怎么样", "怎样", "", "", "照着",
"", "", "这边", "这儿", "这个", "这会儿", "这就是说", "这里", "这么", "这么点儿", "这么些", "这么样", "这时", "这些", "这样",
"正如", "", "", "之类", "之所以", "之一", "只是", "只限", "只要", "只有", "", "至于", "诸位", "", "着呢", "", "自从",
"自个儿", "自各儿", "自己", "自家", "自身", "综上所述", "总的来看", "总的来说", "总的说来", "总而言之", "总之", "", "纵令",
"纵然", "纵使", "遵照", "作为", "", "", "", "", "", "", "", "喔唷", "", "", "", "~", "!", ".", ":",
"\"", "'", "(", ")", "*", "A", "", "社会主义", "--", "..", ">>", " [", " ]", "", "<", ">", "/", "\\", "|", "-", "_",
"+", "=", "&", "^", "%", "#", "@", "`", ";", "$", "", "", "——", "", "", "·", "...", "", "", "", "", "",
" ", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "", "", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "", "", "", "", "", "︿", "", "", "", "", "", "",
"", "", "", "啊哈", "啊呀", "啊哟", "挨次", "挨个", "挨家挨户", "挨门挨户", "挨门逐户", "挨着", "按理", "按期", "按时",
"按说", "暗地里", "暗中", "暗自", "昂然", "八成", "白白", "", "", "保管", "保险", "", "背地里", "背靠背", "倍感", "倍加",
"本人", "本身", "", "比起", "比如说", "比照", "毕竟", "", "必定", "必将", "必须", "便", "别人", "并非", "并肩", "并没",
"并没有", "并排", "并无", "勃然", "", "不必", "不常", "不大", "不但...而且", "不得", "不得不", "不得了", "不得已", "不迭",
"不定", "不对", "不妨", "不管怎样", "不会", "不仅...而且", "不仅仅", "不仅仅是", "不经意", "不可开交", "不可抗拒", "不力", "不了",
"不料", "不满", "不免", "不能不", "不起", "不巧", "不然的话", "不日", "不少", "不胜", "不时", "不是", "不同", "不能", "不要",
"不外", "不外乎", "不下", "不限", "不消", "不已", "不亦乐乎", "不由得", "不再", "不择手段", "不怎么", "不曾", "不知不觉", "不止",
"不止一次", "不至于", "", "才能", "策略地", "差不多", "差一点", "", "常常", "常言道", "常言说", "常言说得好", "长此下去",
"长话短说", "长期以来", "长线", "敞开儿", "彻夜", "陈年", "趁便", "趁机", "趁热", "趁势", "趁早", "成年", "成年累月", "成心",
"乘机", "乘胜", "乘势", "乘隙", "乘虚", "诚然", "迟早", "充分", "充其极", "充其量", "抽冷子", "", "", "", "出来", "出去",
"除此", "除此而外", "除此以外", "除开", "除去", "除却", "除外", "处处", "川流不息", "", "传说", "传闻", "串行", "", "纯粹",
"此后", "此中", "次第", "匆匆", "从不", "从此", "从此以后", "从古到今", "从古至今", "从今以后", "从宽", "从来", "从轻", "从速",
"从头", "从未", "从无到有", "从小", "从新", "从严", "从优", "从早到晚", "从中", "从重", "凑巧", "", "存心", "达旦", "打从",
"打开天窗说亮话", "", "大不了", "大大", "大抵", "大都", "大多", "大凡", "大概", "大家", "大举", "大略", "大面儿上", "大事",
"大体", "大体上", "大约", "大张旗鼓", "大致", "呆呆地", "", "", "待到", "", "单纯", "单单", "但愿", "弹指之间", "当场",
"当儿", "当即", "当口儿", "当然", "当庭", "当头", "当下", "当真", "当中", "倒不如", "倒不如说", "倒是", "到处", "到底", "到了儿",
"到目前为止", "到头", "到头来", "得起", "得天独厚", "的确", "等到", "叮当", "顶多", "", "动不动", "动辄", "陡然", "", "",
"独自", "断然", "顿时", "多次", "多多", "多多少少", "多多益善", "多亏", "多年来", "多年前", "而后", "而论", "而又", "尔等",
"二话不说", "二话没说", "反倒", "反倒是", "反而", "反手", "反之亦然", "反之则", "", "方才", "方能", "放量", "非常", "非得",
"分期", "分期分批", "分头", "奋勇", "愤然", "风雨无阻", "", "", "", "嘎嘎", "该当", "", "赶快", "赶早不赶晚", "",
"敢情", "敢于", "", "刚才", "刚好", "刚巧", "高低", "格外", "隔日", "隔夜", "个人", "各式", "", "更加", "更进一步", "更为",
"公然", "", "共总", "够瞧的", "姑且", "古来", "故而", "故意", "", "", "怪不得", "惯常", "", "光是", "归根到底",
"归根结底", "过于", "毫不", "毫无", "毫无保留地", "毫无例外", "好在", "何必", "何尝", "何妨", "何苦", "何乐而不为", "何须",
"何止", "", "很多", "很少", "轰然", "后来", "呼啦", "忽地", "忽然", "", "互相", "哗啦", "话说", "", "恍然", "", "豁然",
"", "伙同", "或多或少", "或许", "基本", "基本上", "基于", "", "极大", "极度", "极端", "极力", "极其", "极为", "急匆匆",
"即将", "即刻", "即是说", "几度", "几番", "几乎", "几经", "既...又", "继之", "加上", "加以", "间或", "简而言之", "简言之",
"简直", "", "将才", "将近", "将要", "交口", "较比", "较为", "接连不断", "接下来", "皆可", "截然", "截至", "藉以", "借此",
"借以", "届时", "", "仅仅", "", "进来", "进去", "", "近几年来", "近来", "近年来", "尽管如此", "尽可能", "尽快", "尽量",
"尽然", "尽如人意", "尽心竭力", "尽心尽力", "尽早", "精光", "经常", "", "竟然", "究竟", "就此", "就地", "就算", "居然", "局外",
"举凡", "据称", "据此", "据实", "据说", "据我所知", "据悉", "具体来说", "决不", "决非", "", "绝不", "绝顶", "绝对", "绝非",
"", "", "", "看来", "看起来", "看上去", "看样子", "可好", "可能", "恐怕", "", "快要", "来不及", "来得及", "来讲",
"来看", "拦腰", "牢牢", "", "老大", "老老实实", "老是", "累次", "累年", "理当", "理该", "理应", "", "", "立地", "立刻",
"立马", "立时", "联袂", "连连", "连日", "连日来", "连声", "连袂", "临到", "另方面", "另行", "另一个", "路经", "", "屡次",
"屡次三番", "屡屡", "缕缕", "率尔", "率然", "", "略加", "略微", "略为", "论说", "马上", "", "", "", "没有", "每逢",
"每每", "每时每刻", "猛然", "猛然间", "", "莫不", "莫非", "莫如", "默默地", "默然", "", "那末", "", "难道", "难得", "难怪",
"难说", "", "年复一年", "凝神", "偶而", "偶尔", "", "", "碰巧", "譬如", "偏偏", "", "平素", "", "迫于", "扑通",
"其后", "其实", "", "", "起初", "起来", "起首", "起头", "起先", "", "岂非", "岂止", "", "恰逢", "恰好", "恰恰", "恰巧",
"恰如", "恰似", "", "千万", "千万千万", "", "切不可", "切莫", "切切", "切勿", "", "亲口", "亲身", "亲手", "亲眼", "亲自",
"", "顷刻", "顷刻间", "顷刻之间", "请勿", "穷年累月", "取道", "", "权时", "全都", "全力", "全年", "全然", "全身心", "",
"人人", "", "仍旧", "仍然", "日复一日", "日见", "日渐", "日益", "日臻", "如常", "如此等等", "如次", "如今", "如期", "如前所述",
"如上", "如下", "", "三番两次", "三番五次", "三天两头", "瑟瑟", "沙沙", "", "上来", "上去", "一个", "", "", "\n"
}

View File

@@ -0,0 +1,54 @@
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any
from core.rag.models.document import Document
from models.dataset import Dataset
class BaseKeyword(ABC):
def __init__(self, dataset: Dataset):
self.dataset = dataset
@abstractmethod
def create(self, texts: list[Document], **kwargs) -> BaseKeyword:
raise NotImplementedError
@abstractmethod
def add_texts(self, texts: list[Document], **kwargs):
raise NotImplementedError
@abstractmethod
def text_exists(self, id: str) -> bool:
raise NotImplementedError
@abstractmethod
def delete_by_ids(self, ids: list[str]) -> None:
raise NotImplementedError
@abstractmethod
def delete_by_document_id(self, document_id: str) -> None:
raise NotImplementedError
def delete(self) -> None:
raise NotImplementedError
def search(
self, query: str,
**kwargs: Any
) -> list[Document]:
raise NotImplementedError
def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]:
for text in texts:
doc_id = text.metadata['doc_id']
exists_duplicate_node = self.text_exists(doc_id)
if exists_duplicate_node:
texts.remove(text)
return texts
def _get_uuids(self, texts: list[Document]) -> list[str]:
return [text.metadata['doc_id'] for text in texts]

View File

@@ -0,0 +1,60 @@
from typing import Any, cast
from flask import current_app
from core.rag.datasource.keyword.jieba.jieba import Jieba
from core.rag.datasource.keyword.keyword_base import BaseKeyword
from core.rag.models.document import Document
from models.dataset import Dataset
class Keyword:
def __init__(self, dataset: Dataset):
self._dataset = dataset
self._keyword_processor = self._init_keyword()
def _init_keyword(self) -> BaseKeyword:
config = cast(dict, current_app.config)
keyword_type = config.get('KEYWORD_STORE')
if not keyword_type:
raise ValueError("Keyword store must be specified.")
if keyword_type == "jieba":
return Jieba(
dataset=self._dataset
)
else:
raise ValueError(f"Vector store {config.get('VECTOR_STORE')} is not supported.")
def create(self, texts: list[Document], **kwargs):
self._keyword_processor.create(texts, **kwargs)
def add_texts(self, texts: list[Document], **kwargs):
self._keyword_processor.add_texts(texts, **kwargs)
def text_exists(self, id: str) -> bool:
return self._keyword_processor.text_exists(id)
def delete_by_ids(self, ids: list[str]) -> None:
self._keyword_processor.delete_by_ids(ids)
def delete_by_document_id(self, document_id: str) -> None:
self._keyword_processor.delete_by_document_id(document_id)
def delete(self) -> None:
self._keyword_processor.delete()
def search(
self, query: str,
**kwargs: Any
) -> list[Document]:
return self._keyword_processor.search(query, **kwargs)
def __getattr__(self, name):
if self._keyword_processor is not None:
method = getattr(self._keyword_processor, name)
if callable(method):
return method
raise AttributeError(f"'Keyword' object has no attribute '{name}'")