dataset_multi_retriever_tool.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. import json
  2. import threading
  3. from typing import List, Optional, Type
  4. from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
  5. from core.embedding.cached_embedding import CacheEmbedding
  6. from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
  7. from core.index.keyword_table_index.keyword_table_index import KeywordTableConfig, KeywordTableIndex
  8. from core.model_manager import ModelManager
  9. from core.model_runtime.entities.model_entities import ModelType
  10. from core.rerank.rerank import RerankRunner
  11. from extensions.ext_database import db
  12. from flask import Flask, current_app
  13. from langchain.tools import BaseTool
  14. from models.dataset import Dataset, Document, DocumentSegment
  15. from pydantic import BaseModel, Field
  16. from services.retrieval_service import RetrievalService
  17. default_retrieval_model = {
  18. 'search_method': 'semantic_search',
  19. 'reranking_enable': False,
  20. 'reranking_model': {
  21. 'reranking_provider_name': '',
  22. 'reranking_model_name': ''
  23. },
  24. 'top_k': 2,
  25. 'score_threshold_enabled': False
  26. }
  27. class DatasetMultiRetrieverToolInput(BaseModel):
  28. query: str = Field(..., description="dataset multi retriever and rerank")
  29. class DatasetMultiRetrieverTool(BaseTool):
  30. """Tool for querying multi dataset."""
  31. name: str = "dataset-"
  32. args_schema: Type[BaseModel] = DatasetMultiRetrieverToolInput
  33. description: str = "dataset multi retriever and rerank. "
  34. tenant_id: str
  35. dataset_ids: List[str]
  36. top_k: int = 2
  37. score_threshold: Optional[float] = None
  38. reranking_provider_name: str
  39. reranking_model_name: str
  40. return_resource: bool
  41. retriever_from: str
  42. hit_callbacks: List[DatasetIndexToolCallbackHandler] = []
  43. @classmethod
  44. def from_dataset(cls, dataset_ids: List[str], tenant_id: str, **kwargs):
  45. return cls(
  46. name=f'dataset-{tenant_id}',
  47. tenant_id=tenant_id,
  48. dataset_ids=dataset_ids,
  49. **kwargs
  50. )
  51. def _run(self, query: str) -> str:
  52. threads = []
  53. all_documents = []
  54. for dataset_id in self.dataset_ids:
  55. retrieval_thread = threading.Thread(target=self._retriever, kwargs={
  56. 'flask_app': current_app._get_current_object(),
  57. 'dataset_id': dataset_id,
  58. 'query': query,
  59. 'all_documents': all_documents,
  60. 'hit_callbacks': self.hit_callbacks
  61. })
  62. threads.append(retrieval_thread)
  63. retrieval_thread.start()
  64. for thread in threads:
  65. thread.join()
  66. # do rerank for searched documents
  67. model_manager = ModelManager()
  68. rerank_model_instance = model_manager.get_model_instance(
  69. tenant_id=self.tenant_id,
  70. provider=self.reranking_provider_name,
  71. model_type=ModelType.RERANK,
  72. model=self.reranking_model_name
  73. )
  74. rerank_runner = RerankRunner(rerank_model_instance)
  75. all_documents = rerank_runner.run(query, all_documents, self.score_threshold, self.top_k)
  76. for hit_callback in self.hit_callbacks:
  77. hit_callback.on_tool_end(all_documents)
  78. document_score_list = {}
  79. for item in all_documents:
  80. if 'score' in item.metadata and item.metadata['score']:
  81. document_score_list[item.metadata['doc_id']] = item.metadata['score']
  82. document_context_list = []
  83. index_node_ids = [document.metadata['doc_id'] for document in all_documents]
  84. segments = DocumentSegment.query.filter(
  85. DocumentSegment.dataset_id.in_(self.dataset_ids),
  86. DocumentSegment.completed_at.isnot(None),
  87. DocumentSegment.status == 'completed',
  88. DocumentSegment.enabled == True,
  89. DocumentSegment.index_node_id.in_(index_node_ids)
  90. ).all()
  91. if segments:
  92. index_node_id_to_position = {id: position for position, id in enumerate(index_node_ids)}
  93. sorted_segments = sorted(segments,
  94. key=lambda segment: index_node_id_to_position.get(segment.index_node_id,
  95. float('inf')))
  96. for segment in sorted_segments:
  97. if segment.answer:
  98. document_context_list.append(f'question:{segment.content} answer:{segment.answer}')
  99. else:
  100. document_context_list.append(segment.content)
  101. if self.return_resource:
  102. context_list = []
  103. resource_number = 1
  104. for segment in sorted_segments:
  105. dataset = Dataset.query.filter_by(
  106. id=segment.dataset_id
  107. ).first()
  108. document = Document.query.filter(Document.id == segment.document_id,
  109. Document.enabled == True,
  110. Document.archived == False,
  111. ).first()
  112. if dataset and document:
  113. source = {
  114. 'position': resource_number,
  115. 'dataset_id': dataset.id,
  116. 'dataset_name': dataset.name,
  117. 'document_id': document.id,
  118. 'document_name': document.name,
  119. 'data_source_type': document.data_source_type,
  120. 'segment_id': segment.id,
  121. 'retriever_from': self.retriever_from,
  122. 'score': document_score_list.get(segment.index_node_id, None)
  123. }
  124. if self.retriever_from == 'dev':
  125. source['hit_count'] = segment.hit_count
  126. source['word_count'] = segment.word_count
  127. source['segment_position'] = segment.position
  128. source['index_node_hash'] = segment.index_node_hash
  129. if segment.answer:
  130. source['content'] = f'question:{segment.content} \nanswer:{segment.answer}'
  131. else:
  132. source['content'] = segment.content
  133. context_list.append(source)
  134. resource_number += 1
  135. for hit_callback in self.hit_callbacks:
  136. hit_callback.return_retriever_resource_info(context_list)
  137. return str("\n".join(document_context_list))
  138. async def _arun(self, tool_input: str) -> str:
  139. raise NotImplementedError()
  140. def _retriever(self, flask_app: Flask, dataset_id: str, query: str, all_documents: List,
  141. hit_callbacks: List[DatasetIndexToolCallbackHandler]):
  142. with flask_app.app_context():
  143. dataset = db.session.query(Dataset).filter(
  144. Dataset.tenant_id == self.tenant_id,
  145. Dataset.id == dataset_id
  146. ).first()
  147. if not dataset:
  148. return []
  149. for hit_callback in hit_callbacks:
  150. hit_callback.on_query(query, dataset.id)
  151. # get retrieval model , if the model is not setting , using default
  152. retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
  153. if dataset.indexing_technique == "economy":
  154. # use keyword table query
  155. kw_table_index = KeywordTableIndex(
  156. dataset=dataset,
  157. config=KeywordTableConfig(
  158. max_keywords_per_chunk=5
  159. )
  160. )
  161. documents = kw_table_index.search(query, search_kwargs={'k': self.top_k})
  162. if documents:
  163. all_documents.extend(documents)
  164. else:
  165. try:
  166. model_manager = ModelManager()
  167. embedding_model = model_manager.get_model_instance(
  168. tenant_id=dataset.tenant_id,
  169. provider=dataset.embedding_model_provider,
  170. model_type=ModelType.TEXT_EMBEDDING,
  171. model=dataset.embedding_model
  172. )
  173. except LLMBadRequestError:
  174. return []
  175. except ProviderTokenNotInitError:
  176. return []
  177. embeddings = CacheEmbedding(embedding_model)
  178. documents = []
  179. threads = []
  180. if self.top_k > 0:
  181. # retrieval_model source with semantic
  182. if retrieval_model['search_method'] == 'semantic_search' or retrieval_model[
  183. 'search_method'] == 'hybrid_search':
  184. embedding_thread = threading.Thread(target=RetrievalService.embedding_search, kwargs={
  185. 'flask_app': current_app._get_current_object(),
  186. 'dataset_id': str(dataset.id),
  187. 'query': query,
  188. 'top_k': self.top_k,
  189. 'score_threshold': self.score_threshold,
  190. 'reranking_model': None,
  191. 'all_documents': documents,
  192. 'search_method': 'hybrid_search',
  193. 'embeddings': embeddings
  194. })
  195. threads.append(embedding_thread)
  196. embedding_thread.start()
  197. # retrieval_model source with full text
  198. if retrieval_model['search_method'] == 'full_text_search' or retrieval_model[
  199. 'search_method'] == 'hybrid_search':
  200. full_text_index_thread = threading.Thread(target=RetrievalService.full_text_index_search,
  201. kwargs={
  202. 'flask_app': current_app._get_current_object(),
  203. 'dataset_id': str(dataset.id),
  204. 'query': query,
  205. 'search_method': 'hybrid_search',
  206. 'embeddings': embeddings,
  207. 'score_threshold': retrieval_model[
  208. 'score_threshold'] if retrieval_model[
  209. 'score_threshold_enabled'] else None,
  210. 'top_k': self.top_k,
  211. 'reranking_model': retrieval_model[
  212. 'reranking_model'] if retrieval_model[
  213. 'reranking_enable'] else None,
  214. 'all_documents': documents
  215. })
  216. threads.append(full_text_index_thread)
  217. full_text_index_thread.start()
  218. for thread in threads:
  219. thread.join()
  220. all_documents.extend(documents)