hit_testing_service.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. import logging
  2. import time
  3. from typing import List
  4. import numpy as np
  5. from flask import current_app
  6. from langchain.embeddings import OpenAIEmbeddings
  7. from langchain.embeddings.base import Embeddings
  8. from langchain.schema import Document
  9. from sklearn.manifold import TSNE
  10. from core.embedding.cached_embedding import CacheEmbedding
  11. from core.index.vector_index.vector_index import VectorIndex
  12. from core.llm.llm_builder import LLMBuilder
  13. from extensions.ext_database import db
  14. from models.account import Account
  15. from models.dataset import Dataset, DocumentSegment, DatasetQuery
  16. class HitTestingService:
  17. @classmethod
  18. def retrieve(cls, dataset: Dataset, query: str, account: Account, limit: int = 10) -> dict:
  19. if dataset.available_document_count == 0 or dataset.available_document_count == 0:
  20. return {
  21. "query": {
  22. "content": query,
  23. "tsne_position": {'x': 0, 'y': 0},
  24. },
  25. "records": []
  26. }
  27. model_credentials = LLMBuilder.get_model_credentials(
  28. tenant_id=dataset.tenant_id,
  29. model_provider=LLMBuilder.get_default_provider(dataset.tenant_id, 'text-embedding-ada-002'),
  30. model_name='text-embedding-ada-002'
  31. )
  32. embeddings = CacheEmbedding(OpenAIEmbeddings(
  33. **model_credentials
  34. ))
  35. vector_index = VectorIndex(
  36. dataset=dataset,
  37. config=current_app.config,
  38. embeddings=embeddings
  39. )
  40. start = time.perf_counter()
  41. documents = vector_index.search(
  42. query,
  43. search_type='similarity_score_threshold',
  44. search_kwargs={
  45. 'k': 10
  46. }
  47. )
  48. end = time.perf_counter()
  49. logging.debug(f"Hit testing retrieve in {end - start:0.4f} seconds")
  50. dataset_query = DatasetQuery(
  51. dataset_id=dataset.id,
  52. content=query,
  53. source='hit_testing',
  54. created_by_role='account',
  55. created_by=account.id
  56. )
  57. db.session.add(dataset_query)
  58. db.session.commit()
  59. return cls.compact_retrieve_response(dataset, embeddings, query, documents)
  60. @classmethod
  61. def compact_retrieve_response(cls, dataset: Dataset, embeddings: Embeddings, query: str, documents: List[Document]):
  62. text_embeddings = [
  63. embeddings.embed_query(query)
  64. ]
  65. text_embeddings.extend(embeddings.embed_documents([document.page_content for document in documents]))
  66. tsne_position_data = cls.get_tsne_positions_from_embeddings(text_embeddings)
  67. query_position = tsne_position_data.pop(0)
  68. i = 0
  69. records = []
  70. for document in documents:
  71. index_node_id = document.metadata['doc_id']
  72. segment = db.session.query(DocumentSegment).filter(
  73. DocumentSegment.dataset_id == dataset.id,
  74. DocumentSegment.enabled == True,
  75. DocumentSegment.status == 'completed',
  76. DocumentSegment.index_node_id == index_node_id
  77. ).first()
  78. if not segment:
  79. i += 1
  80. continue
  81. record = {
  82. "segment": segment,
  83. "score": document.metadata['score'],
  84. "tsne_position": tsne_position_data[i]
  85. }
  86. records.append(record)
  87. i += 1
  88. return {
  89. "query": {
  90. "content": query,
  91. "tsne_position": query_position,
  92. },
  93. "records": records
  94. }
  95. @classmethod
  96. def get_tsne_positions_from_embeddings(cls, embeddings: list):
  97. embedding_length = len(embeddings)
  98. if embedding_length <= 1:
  99. return [{'x': 0, 'y': 0}]
  100. concatenate_data = np.array(embeddings).reshape(embedding_length, -1)
  101. # concatenate_data = np.concatenate(embeddings)
  102. perplexity = embedding_length / 2 + 1
  103. if perplexity >= embedding_length:
  104. perplexity = max(embedding_length - 1, 1)
  105. tsne = TSNE(n_components=2, perplexity=perplexity, early_exaggeration=12.0)
  106. data_tsne = tsne.fit_transform(concatenate_data)
  107. tsne_position_data = []
  108. for i in range(len(data_tsne)):
  109. tsne_position_data.append({'x': float(data_tsne[i][0]), 'y': float(data_tsne[i][1])})
  110. return tsne_position_data