qdrant.py 67 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691
  1. """Wrapper around Qdrant vector database."""
  2. from __future__ import annotations
  3. import asyncio
  4. import functools
  5. import uuid
  6. import warnings
  7. from itertools import islice
  8. from operator import itemgetter
  9. from typing import (
  10. TYPE_CHECKING,
  11. Any,
  12. Callable,
  13. Dict,
  14. Generator,
  15. Iterable,
  16. List,
  17. Optional,
  18. Sequence,
  19. Tuple,
  20. Type,
  21. Union,
  22. )
  23. import numpy as np
  24. from langchain.docstore.document import Document
  25. from langchain.embeddings.base import Embeddings
  26. from langchain.vectorstores import VectorStore
  27. from langchain.vectorstores.utils import maximal_marginal_relevance
  28. if TYPE_CHECKING:
  29. from qdrant_client import grpc # noqa
  30. from qdrant_client.conversions import common_types
  31. from qdrant_client.http import models as rest
  32. DictFilter = Dict[str, Union[str, int, bool, dict, list]]
  33. MetadataFilter = Union[DictFilter, common_types.Filter]
  34. class QdrantException(Exception):
  35. """Base class for all the Qdrant related exceptions"""
  36. def sync_call_fallback(method: Callable) -> Callable:
  37. """
  38. Decorator to call the synchronous method of the class if the async method is not
  39. implemented. This decorator might be only used for the methods that are defined
  40. as async in the class.
  41. """
  42. @functools.wraps(method)
  43. async def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
  44. try:
  45. return await method(self, *args, **kwargs)
  46. except NotImplementedError:
  47. # If the async method is not implemented, call the synchronous method
  48. # by removing the first letter from the method name. For example,
  49. # if the async method is called ``aaad_texts``, the synchronous method
  50. # will be called ``aad_texts``.
  51. sync_method = functools.partial(
  52. getattr(self, method.__name__[1:]), *args, **kwargs
  53. )
  54. return await asyncio.get_event_loop().run_in_executor(None, sync_method)
  55. return wrapper
  56. class Qdrant(VectorStore):
  57. """Wrapper around Qdrant vector database.
  58. To use you should have the ``qdrant-client`` package installed.
  59. Example:
  60. .. code-block:: python
  61. from qdrant_client import QdrantClient
  62. from langchain import Qdrant
  63. client = QdrantClient()
  64. collection_name = "MyCollection"
  65. qdrant = Qdrant(client, collection_name, embedding_function)
  66. """
  67. CONTENT_KEY = "page_content"
  68. METADATA_KEY = "metadata"
  69. VECTOR_NAME = None
  70. def __init__(
  71. self,
  72. client: Any,
  73. collection_name: str,
  74. embeddings: Optional[Embeddings] = None,
  75. content_payload_key: str = CONTENT_KEY,
  76. metadata_payload_key: str = METADATA_KEY,
  77. distance_strategy: str = "COSINE",
  78. vector_name: Optional[str] = VECTOR_NAME,
  79. embedding_function: Optional[Callable] = None, # deprecated
  80. ):
  81. """Initialize with necessary components."""
  82. try:
  83. import qdrant_client
  84. except ImportError:
  85. raise ValueError(
  86. "Could not import qdrant-client python package. "
  87. "Please install it with `pip install qdrant-client`."
  88. )
  89. if not isinstance(client, qdrant_client.QdrantClient):
  90. raise ValueError(
  91. f"client should be an instance of qdrant_client.QdrantClient, "
  92. f"got {type(client)}"
  93. )
  94. if embeddings is None and embedding_function is None:
  95. raise ValueError(
  96. "`embeddings` value can't be None. Pass `Embeddings` instance."
  97. )
  98. if embeddings is not None and embedding_function is not None:
  99. raise ValueError(
  100. "Both `embeddings` and `embedding_function` are passed. "
  101. "Use `embeddings` only."
  102. )
  103. self._embeddings = embeddings
  104. self._embeddings_function = embedding_function
  105. self.client: qdrant_client.QdrantClient = client
  106. self.collection_name = collection_name
  107. self.content_payload_key = content_payload_key or self.CONTENT_KEY
  108. self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY
  109. self.vector_name = vector_name or self.VECTOR_NAME
  110. if embedding_function is not None:
  111. warnings.warn(
  112. "Using `embedding_function` is deprecated. "
  113. "Pass `Embeddings` instance to `embeddings` instead."
  114. )
  115. if not isinstance(embeddings, Embeddings):
  116. warnings.warn(
  117. "`embeddings` should be an instance of `Embeddings`."
  118. "Using `embeddings` as `embedding_function` which is deprecated"
  119. )
  120. self._embeddings_function = embeddings
  121. self._embeddings = None
  122. self.distance_strategy = distance_strategy.upper()
  123. @property
  124. def embeddings(self) -> Optional[Embeddings]:
  125. return self._embeddings
  126. def add_texts(
  127. self,
  128. texts: Iterable[str],
  129. metadatas: Optional[List[dict]] = None,
  130. ids: Optional[Sequence[str]] = None,
  131. batch_size: int = 64,
  132. **kwargs: Any,
  133. ) -> List[str]:
  134. """Run more texts through the embeddings and add to the vectorstore.
  135. Args:
  136. texts: Iterable of strings to add to the vectorstore.
  137. metadatas: Optional list of metadatas associated with the texts.
  138. ids:
  139. Optional list of ids to associate with the texts. Ids have to be
  140. uuid-like strings.
  141. batch_size:
  142. How many vectors upload per-request.
  143. Default: 64
  144. Returns:
  145. List of ids from adding the texts into the vectorstore.
  146. """
  147. added_ids = []
  148. for batch_ids, points in self._generate_rest_batches(
  149. texts, metadatas, ids, batch_size
  150. ):
  151. self.client.upsert(
  152. collection_name=self.collection_name, points=points, **kwargs
  153. )
  154. added_ids.extend(batch_ids)
  155. return added_ids
  156. @sync_call_fallback
  157. async def aadd_texts(
  158. self,
  159. texts: Iterable[str],
  160. metadatas: Optional[List[dict]] = None,
  161. ids: Optional[Sequence[str]] = None,
  162. batch_size: int = 64,
  163. **kwargs: Any,
  164. ) -> List[str]:
  165. """Run more texts through the embeddings and add to the vectorstore.
  166. Args:
  167. texts: Iterable of strings to add to the vectorstore.
  168. metadatas: Optional list of metadatas associated with the texts.
  169. ids:
  170. Optional list of ids to associate with the texts. Ids have to be
  171. uuid-like strings.
  172. batch_size:
  173. How many vectors upload per-request.
  174. Default: 64
  175. Returns:
  176. List of ids from adding the texts into the vectorstore.
  177. """
  178. from qdrant_client import grpc # noqa
  179. from qdrant_client.conversions.conversion import RestToGrpc
  180. added_ids = []
  181. for batch_ids, points in self._generate_rest_batches(
  182. texts, metadatas, ids, batch_size
  183. ):
  184. await self.client.async_grpc_points.Upsert(
  185. grpc.UpsertPoints(
  186. collection_name=self.collection_name,
  187. points=[RestToGrpc.convert_point_struct(point) for point in points],
  188. )
  189. )
  190. added_ids.extend(batch_ids)
  191. return added_ids
  192. def similarity_search(
  193. self,
  194. query: str,
  195. k: int = 4,
  196. filter: Optional[MetadataFilter] = None,
  197. search_params: Optional[common_types.SearchParams] = None,
  198. offset: int = 0,
  199. score_threshold: Optional[float] = None,
  200. consistency: Optional[common_types.ReadConsistency] = None,
  201. **kwargs: Any,
  202. ) -> List[Tuple[Document, float]]:
  203. """Return docs most similar to query.
  204. Args:
  205. query: Text to look up documents similar to.
  206. k: Number of Documents to return. Defaults to 4.
  207. filter: Filter by metadata. Defaults to None.
  208. search_params: Additional search params
  209. offset:
  210. Offset of the first result to return.
  211. May be used to paginate results.
  212. Note: large offset values may cause performance issues.
  213. score_threshold:
  214. Define a minimal score threshold for the result.
  215. If defined, less similar results will not be returned.
  216. Score of the returned result might be higher or smaller than the
  217. threshold depending on the Distance function used.
  218. E.g. for cosine similarity only higher scores will be returned.
  219. consistency:
  220. Read consistency of the search. Defines how many replicas should be
  221. queried before returning the result.
  222. Values:
  223. - int - number of replicas to query, values should present in all
  224. queried replicas
  225. - 'majority' - query all replicas, but return values present in the
  226. majority of replicas
  227. - 'quorum' - query the majority of replicas, return values present in
  228. all of them
  229. - 'all' - query all replicas, and return values present in all replicas
  230. Returns:
  231. List of Documents most similar to the query.
  232. """
  233. results = self.similarity_search_with_score(
  234. query,
  235. k,
  236. filter=filter,
  237. search_params=search_params,
  238. offset=offset,
  239. score_threshold=score_threshold,
  240. consistency=consistency,
  241. **kwargs,
  242. )
  243. return list(map(itemgetter(0), results))
  244. @sync_call_fallback
  245. async def asimilarity_search(
  246. self,
  247. query: str,
  248. k: int = 4,
  249. filter: Optional[MetadataFilter] = None,
  250. **kwargs: Any,
  251. ) -> List[Document]:
  252. """Return docs most similar to query.
  253. Args:
  254. query: Text to look up documents similar to.
  255. k: Number of Documents to return. Defaults to 4.
  256. filter: Filter by metadata. Defaults to None.
  257. Returns:
  258. List of Documents most similar to the query.
  259. """
  260. results = await self.asimilarity_search_with_score(query, k, filter, **kwargs)
  261. return list(map(itemgetter(0), results))
  262. def similarity_search_with_score(
  263. self,
  264. query: str,
  265. k: int = 4,
  266. filter: Optional[MetadataFilter] = None,
  267. search_params: Optional[common_types.SearchParams] = None,
  268. offset: int = 0,
  269. score_threshold: Optional[float] = None,
  270. consistency: Optional[common_types.ReadConsistency] = None,
  271. **kwargs: Any,
  272. ) -> List[Tuple[Document, float]]:
  273. """Return docs most similar to query.
  274. Args:
  275. query: Text to look up documents similar to.
  276. k: Number of Documents to return. Defaults to 4.
  277. filter: Filter by metadata. Defaults to None.
  278. search_params: Additional search params
  279. offset:
  280. Offset of the first result to return.
  281. May be used to paginate results.
  282. Note: large offset values may cause performance issues.
  283. score_threshold:
  284. Define a minimal score threshold for the result.
  285. If defined, less similar results will not be returned.
  286. Score of the returned result might be higher or smaller than the
  287. threshold depending on the Distance function used.
  288. E.g. for cosine similarity only higher scores will be returned.
  289. consistency:
  290. Read consistency of the search. Defines how many replicas should be
  291. queried before returning the result.
  292. Values:
  293. - int - number of replicas to query, values should present in all
  294. queried replicas
  295. - 'majority' - query all replicas, but return values present in the
  296. majority of replicas
  297. - 'quorum' - query the majority of replicas, return values present in
  298. all of them
  299. - 'all' - query all replicas, and return values present in all replicas
  300. Returns:
  301. List of documents most similar to the query text and distance for each.
  302. """
  303. return self.similarity_search_with_score_by_vector(
  304. self._embed_query(query),
  305. k,
  306. filter=filter,
  307. search_params=search_params,
  308. offset=offset,
  309. score_threshold=score_threshold,
  310. consistency=consistency,
  311. **kwargs,
  312. )
  313. @sync_call_fallback
  314. async def asimilarity_search_with_score(
  315. self,
  316. query: str,
  317. k: int = 4,
  318. filter: Optional[MetadataFilter] = None,
  319. search_params: Optional[common_types.SearchParams] = None,
  320. offset: int = 0,
  321. score_threshold: Optional[float] = None,
  322. consistency: Optional[common_types.ReadConsistency] = None,
  323. **kwargs: Any,
  324. ) -> List[Tuple[Document, float]]:
  325. """Return docs most similar to query.
  326. Args:
  327. query: Text to look up documents similar to.
  328. k: Number of Documents to return. Defaults to 4.
  329. filter: Filter by metadata. Defaults to None.
  330. search_params: Additional search params
  331. offset:
  332. Offset of the first result to return.
  333. May be used to paginate results.
  334. Note: large offset values may cause performance issues.
  335. score_threshold:
  336. Define a minimal score threshold for the result.
  337. If defined, less similar results will not be returned.
  338. Score of the returned result might be higher or smaller than the
  339. threshold depending on the Distance function used.
  340. E.g. for cosine similarity only higher scores will be returned.
  341. consistency:
  342. Read consistency of the search. Defines how many replicas should be
  343. queried before returning the result.
  344. Values:
  345. - int - number of replicas to query, values should present in all
  346. queried replicas
  347. - 'majority' - query all replicas, but return values present in the
  348. majority of replicas
  349. - 'quorum' - query the majority of replicas, return values present in
  350. all of them
  351. - 'all' - query all replicas, and return values present in all replicas
  352. Returns:
  353. List of documents most similar to the query text and distance for each.
  354. """
  355. return await self.asimilarity_search_with_score_by_vector(
  356. self._embed_query(query),
  357. k,
  358. filter=filter,
  359. search_params=search_params,
  360. offset=offset,
  361. score_threshold=score_threshold,
  362. consistency=consistency,
  363. **kwargs,
  364. )
  365. def similarity_search_by_vector(
  366. self,
  367. embedding: List[float],
  368. k: int = 4,
  369. filter: Optional[MetadataFilter] = None,
  370. search_params: Optional[common_types.SearchParams] = None,
  371. offset: int = 0,
  372. score_threshold: Optional[float] = None,
  373. consistency: Optional[common_types.ReadConsistency] = None,
  374. **kwargs: Any,
  375. ) -> List[Document]:
  376. """Return docs most similar to embedding vector.
  377. Args:
  378. embedding: Embedding vector to look up documents similar to.
  379. k: Number of Documents to return. Defaults to 4.
  380. filter: Filter by metadata. Defaults to None.
  381. search_params: Additional search params
  382. offset:
  383. Offset of the first result to return.
  384. May be used to paginate results.
  385. Note: large offset values may cause performance issues.
  386. score_threshold:
  387. Define a minimal score threshold for the result.
  388. If defined, less similar results will not be returned.
  389. Score of the returned result might be higher or smaller than the
  390. threshold depending on the Distance function used.
  391. E.g. for cosine similarity only higher scores will be returned.
  392. consistency:
  393. Read consistency of the search. Defines how many replicas should be
  394. queried before returning the result.
  395. Values:
  396. - int - number of replicas to query, values should present in all
  397. queried replicas
  398. - 'majority' - query all replicas, but return values present in the
  399. majority of replicas
  400. - 'quorum' - query the majority of replicas, return values present in
  401. all of them
  402. - 'all' - query all replicas, and return values present in all replicas
  403. Returns:
  404. List of Documents most similar to the query.
  405. """
  406. results = self.similarity_search_with_score_by_vector(
  407. embedding,
  408. k,
  409. filter=filter,
  410. search_params=search_params,
  411. offset=offset,
  412. score_threshold=score_threshold,
  413. consistency=consistency,
  414. **kwargs,
  415. )
  416. return list(map(itemgetter(0), results))
  417. @sync_call_fallback
  418. async def asimilarity_search_by_vector(
  419. self,
  420. embedding: List[float],
  421. k: int = 4,
  422. filter: Optional[MetadataFilter] = None,
  423. search_params: Optional[common_types.SearchParams] = None,
  424. offset: int = 0,
  425. score_threshold: Optional[float] = None,
  426. consistency: Optional[common_types.ReadConsistency] = None,
  427. **kwargs: Any,
  428. ) -> List[Document]:
  429. """Return docs most similar to embedding vector.
  430. Args:
  431. embedding: Embedding vector to look up documents similar to.
  432. k: Number of Documents to return. Defaults to 4.
  433. filter: Filter by metadata. Defaults to None.
  434. search_params: Additional search params
  435. offset:
  436. Offset of the first result to return.
  437. May be used to paginate results.
  438. Note: large offset values may cause performance issues.
  439. score_threshold:
  440. Define a minimal score threshold for the result.
  441. If defined, less similar results will not be returned.
  442. Score of the returned result might be higher or smaller than the
  443. threshold depending on the Distance function used.
  444. E.g. for cosine similarity only higher scores will be returned.
  445. consistency:
  446. Read consistency of the search. Defines how many replicas should be
  447. queried before returning the result.
  448. Values:
  449. - int - number of replicas to query, values should present in all
  450. queried replicas
  451. - 'majority' - query all replicas, but return values present in the
  452. majority of replicas
  453. - 'quorum' - query the majority of replicas, return values present in
  454. all of them
  455. - 'all' - query all replicas, and return values present in all replicas
  456. Returns:
  457. List of Documents most similar to the query.
  458. """
  459. results = await self.asimilarity_search_with_score_by_vector(
  460. embedding,
  461. k,
  462. filter=filter,
  463. search_params=search_params,
  464. offset=offset,
  465. score_threshold=score_threshold,
  466. consistency=consistency,
  467. **kwargs,
  468. )
  469. return list(map(itemgetter(0), results))
  470. def similarity_search_with_score_by_vector(
  471. self,
  472. embedding: List[float],
  473. k: int = 4,
  474. filter: Optional[MetadataFilter] = None,
  475. search_params: Optional[common_types.SearchParams] = None,
  476. offset: int = 0,
  477. score_threshold: Optional[float] = None,
  478. consistency: Optional[common_types.ReadConsistency] = None,
  479. **kwargs: Any,
  480. ) -> List[Tuple[Document, float]]:
  481. """Return docs most similar to embedding vector.
  482. Args:
  483. embedding: Embedding vector to look up documents similar to.
  484. k: Number of Documents to return. Defaults to 4.
  485. filter: Filter by metadata. Defaults to None.
  486. search_params: Additional search params
  487. offset:
  488. Offset of the first result to return.
  489. May be used to paginate results.
  490. Note: large offset values may cause performance issues.
  491. score_threshold:
  492. Define a minimal score threshold for the result.
  493. If defined, less similar results will not be returned.
  494. Score of the returned result might be higher or smaller than the
  495. threshold depending on the Distance function used.
  496. E.g. for cosine similarity only higher scores will be returned.
  497. consistency:
  498. Read consistency of the search. Defines how many replicas should be
  499. queried before returning the result.
  500. Values:
  501. - int - number of replicas to query, values should present in all
  502. queried replicas
  503. - 'majority' - query all replicas, but return values present in the
  504. majority of replicas
  505. - 'quorum' - query the majority of replicas, return values present in
  506. all of them
  507. - 'all' - query all replicas, and return values present in all replicas
  508. Returns:
  509. List of documents most similar to the query text and distance for each.
  510. """
  511. if filter is not None and isinstance(filter, dict):
  512. warnings.warn(
  513. "Using dict as a `filter` is deprecated. Please use qdrant-client "
  514. "filters directly: "
  515. "https://qdrant.tech/documentation/concepts/filtering/",
  516. DeprecationWarning,
  517. )
  518. qdrant_filter = self._qdrant_filter_from_dict(filter)
  519. else:
  520. qdrant_filter = filter
  521. query_vector = embedding
  522. if self.vector_name is not None:
  523. query_vector = (self.vector_name, embedding) # type: ignore[assignment]
  524. results = self.client.search(
  525. collection_name=self.collection_name,
  526. query_vector=query_vector,
  527. query_filter=qdrant_filter,
  528. search_params=search_params,
  529. limit=k,
  530. offset=offset,
  531. with_payload=True,
  532. with_vectors=True, # Langchain does not expect vectors to be returned
  533. score_threshold=score_threshold,
  534. consistency=consistency,
  535. **kwargs,
  536. )
  537. return [
  538. (
  539. self._document_from_scored_point(
  540. result, self.content_payload_key, self.metadata_payload_key
  541. ),
  542. result.score,
  543. )
  544. for result in results
  545. ]
  546. @sync_call_fallback
  547. async def asimilarity_search_with_score_by_vector(
  548. self,
  549. embedding: List[float],
  550. k: int = 4,
  551. filter: Optional[MetadataFilter] = None,
  552. search_params: Optional[common_types.SearchParams] = None,
  553. offset: int = 0,
  554. score_threshold: Optional[float] = None,
  555. consistency: Optional[common_types.ReadConsistency] = None,
  556. **kwargs: Any,
  557. ) -> List[Tuple[Document, float]]:
  558. """Return docs most similar to embedding vector.
  559. Args:
  560. embedding: Embedding vector to look up documents similar to.
  561. k: Number of Documents to return. Defaults to 4.
  562. filter: Filter by metadata. Defaults to None.
  563. search_params: Additional search params
  564. offset:
  565. Offset of the first result to return.
  566. May be used to paginate results.
  567. Note: large offset values may cause performance issues.
  568. score_threshold:
  569. Define a minimal score threshold for the result.
  570. If defined, less similar results will not be returned.
  571. Score of the returned result might be higher or smaller than the
  572. threshold depending on the Distance function used.
  573. E.g. for cosine similarity only higher scores will be returned.
  574. consistency:
  575. Read consistency of the search. Defines how many replicas should be
  576. queried before returning the result.
  577. Values:
  578. - int - number of replicas to query, values should present in all
  579. queried replicas
  580. - 'majority' - query all replicas, but return values present in the
  581. majority of replicas
  582. - 'quorum' - query the majority of replicas, return values present in
  583. all of them
  584. - 'all' - query all replicas, and return values present in all replicas
  585. Returns:
  586. List of documents most similar to the query text and distance for each.
  587. """
  588. from qdrant_client import grpc # noqa
  589. from qdrant_client.conversions.conversion import RestToGrpc
  590. from qdrant_client.http import models as rest
  591. if filter is not None and isinstance(filter, dict):
  592. warnings.warn(
  593. "Using dict as a `filter` is deprecated. Please use qdrant-client "
  594. "filters directly: "
  595. "https://qdrant.tech/documentation/concepts/filtering/",
  596. DeprecationWarning,
  597. )
  598. qdrant_filter = self._qdrant_filter_from_dict(filter)
  599. else:
  600. qdrant_filter = filter
  601. if qdrant_filter is not None and isinstance(qdrant_filter, rest.Filter):
  602. qdrant_filter = RestToGrpc.convert_filter(qdrant_filter)
  603. response = await self.client.async_grpc_points.Search(
  604. grpc.SearchPoints(
  605. collection_name=self.collection_name,
  606. vector_name=self.vector_name,
  607. vector=embedding,
  608. filter=qdrant_filter,
  609. params=search_params,
  610. limit=k,
  611. offset=offset,
  612. with_payload=grpc.WithPayloadSelector(enable=True),
  613. with_vectors=grpc.WithVectorsSelector(enable=False),
  614. score_threshold=score_threshold,
  615. read_consistency=consistency,
  616. **kwargs,
  617. )
  618. )
  619. return [
  620. (
  621. self._document_from_scored_point_grpc(
  622. result, self.content_payload_key, self.metadata_payload_key
  623. ),
  624. result.score,
  625. )
  626. for result in response.result
  627. ]
  628. def max_marginal_relevance_search(
  629. self,
  630. query: str,
  631. k: int = 4,
  632. fetch_k: int = 20,
  633. lambda_mult: float = 0.5,
  634. **kwargs: Any,
  635. ) -> List[Document]:
  636. """Return docs selected using the maximal marginal relevance.
  637. Maximal marginal relevance optimizes for similarity to query AND diversity
  638. among selected documents.
  639. Args:
  640. query: Text to look up documents similar to.
  641. k: Number of Documents to return. Defaults to 4.
  642. fetch_k: Number of Documents to fetch to pass to MMR algorithm.
  643. Defaults to 20.
  644. lambda_mult: Number between 0 and 1 that determines the degree
  645. of diversity among the results with 0 corresponding
  646. to maximum diversity and 1 to minimum diversity.
  647. Defaults to 0.5.
  648. Returns:
  649. List of Documents selected by maximal marginal relevance.
  650. """
  651. query_embedding = self._embed_query(query)
  652. return self.max_marginal_relevance_search_by_vector(
  653. query_embedding, k, fetch_k, lambda_mult, **kwargs
  654. )
  655. @sync_call_fallback
  656. async def amax_marginal_relevance_search(
  657. self,
  658. query: str,
  659. k: int = 4,
  660. fetch_k: int = 20,
  661. lambda_mult: float = 0.5,
  662. **kwargs: Any,
  663. ) -> List[Document]:
  664. """Return docs selected using the maximal marginal relevance.
  665. Maximal marginal relevance optimizes for similarity to query AND diversity
  666. among selected documents.
  667. Args:
  668. query: Text to look up documents similar to.
  669. k: Number of Documents to return. Defaults to 4.
  670. fetch_k: Number of Documents to fetch to pass to MMR algorithm.
  671. Defaults to 20.
  672. lambda_mult: Number between 0 and 1 that determines the degree
  673. of diversity among the results with 0 corresponding
  674. to maximum diversity and 1 to minimum diversity.
  675. Defaults to 0.5.
  676. Returns:
  677. List of Documents selected by maximal marginal relevance.
  678. """
  679. query_embedding = self._embed_query(query)
  680. return await self.amax_marginal_relevance_search_by_vector(
  681. query_embedding, k, fetch_k, lambda_mult, **kwargs
  682. )
  683. def max_marginal_relevance_search_by_vector(
  684. self,
  685. embedding: List[float],
  686. k: int = 4,
  687. fetch_k: int = 20,
  688. lambda_mult: float = 0.5,
  689. **kwargs: Any,
  690. ) -> List[Document]:
  691. """Return docs selected using the maximal marginal relevance.
  692. Maximal marginal relevance optimizes for similarity to query AND diversity
  693. among selected documents.
  694. Args:
  695. embedding: Embedding to look up documents similar to.
  696. k: Number of Documents to return. Defaults to 4.
  697. fetch_k: Number of Documents to fetch to pass to MMR algorithm.
  698. lambda_mult: Number between 0 and 1 that determines the degree
  699. of diversity among the results with 0 corresponding
  700. to maximum diversity and 1 to minimum diversity.
  701. Defaults to 0.5.
  702. Returns:
  703. List of Documents selected by maximal marginal relevance.
  704. """
  705. results = self.max_marginal_relevance_search_with_score_by_vector(
  706. embedding=embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs
  707. )
  708. return list(map(itemgetter(0), results))
  709. @sync_call_fallback
  710. async def amax_marginal_relevance_search_by_vector(
  711. self,
  712. embedding: List[float],
  713. k: int = 4,
  714. fetch_k: int = 20,
  715. lambda_mult: float = 0.5,
  716. **kwargs: Any,
  717. ) -> List[Document]:
  718. """Return docs selected using the maximal marginal relevance.
  719. Maximal marginal relevance optimizes for similarity to query AND diversity
  720. among selected documents.
  721. Args:
  722. query: Text to look up documents similar to.
  723. k: Number of Documents to return. Defaults to 4.
  724. fetch_k: Number of Documents to fetch to pass to MMR algorithm.
  725. Defaults to 20.
  726. lambda_mult: Number between 0 and 1 that determines the degree
  727. of diversity among the results with 0 corresponding
  728. to maximum diversity and 1 to minimum diversity.
  729. Defaults to 0.5.
  730. Returns:
  731. List of Documents selected by maximal marginal relevance and distance for
  732. each.
  733. """
  734. results = await self.amax_marginal_relevance_search_with_score_by_vector(
  735. embedding, k, fetch_k, lambda_mult, **kwargs
  736. )
  737. return list(map(itemgetter(0), results))
  738. def max_marginal_relevance_search_with_score_by_vector(
  739. self,
  740. embedding: List[float],
  741. k: int = 4,
  742. fetch_k: int = 20,
  743. lambda_mult: float = 0.5,
  744. **kwargs: Any,
  745. ) -> List[Tuple[Document, float]]:
  746. """Return docs selected using the maximal marginal relevance.
  747. Maximal marginal relevance optimizes for similarity to query AND diversity
  748. among selected documents.
  749. Args:
  750. query: Text to look up documents similar to.
  751. k: Number of Documents to return. Defaults to 4.
  752. fetch_k: Number of Documents to fetch to pass to MMR algorithm.
  753. Defaults to 20.
  754. lambda_mult: Number between 0 and 1 that determines the degree
  755. of diversity among the results with 0 corresponding
  756. to maximum diversity and 1 to minimum diversity.
  757. Defaults to 0.5.
  758. Returns:
  759. List of Documents selected by maximal marginal relevance and distance for
  760. each.
  761. """
  762. query_vector = embedding
  763. if self.vector_name is not None:
  764. query_vector = (self.vector_name, query_vector) # type: ignore[assignment]
  765. results = self.client.search(
  766. collection_name=self.collection_name,
  767. query_vector=query_vector,
  768. with_payload=True,
  769. with_vectors=True,
  770. limit=fetch_k,
  771. )
  772. embeddings = [
  773. result.vector.get(self.vector_name) # type: ignore[index, union-attr]
  774. if self.vector_name is not None
  775. else result.vector
  776. for result in results
  777. ]
  778. mmr_selected = maximal_marginal_relevance(
  779. np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
  780. )
  781. return [
  782. (
  783. self._document_from_scored_point(
  784. results[i], self.content_payload_key, self.metadata_payload_key
  785. ),
  786. results[i].score,
  787. )
  788. for i in mmr_selected
  789. ]
  790. @sync_call_fallback
  791. async def amax_marginal_relevance_search_with_score_by_vector(
  792. self,
  793. embedding: List[float],
  794. k: int = 4,
  795. fetch_k: int = 20,
  796. lambda_mult: float = 0.5,
  797. **kwargs: Any,
  798. ) -> List[Tuple[Document, float]]:
  799. """Return docs selected using the maximal marginal relevance.
  800. Maximal marginal relevance optimizes for similarity to query AND diversity
  801. among selected documents.
  802. Args:
  803. query: Text to look up documents similar to.
  804. k: Number of Documents to return. Defaults to 4.
  805. fetch_k: Number of Documents to fetch to pass to MMR algorithm.
  806. Defaults to 20.
  807. lambda_mult: Number between 0 and 1 that determines the degree
  808. of diversity among the results with 0 corresponding
  809. to maximum diversity and 1 to minimum diversity.
  810. Defaults to 0.5.
  811. Returns:
  812. List of Documents selected by maximal marginal relevance and distance for
  813. each.
  814. """
  815. from qdrant_client import grpc # noqa
  816. from qdrant_client.conversions.conversion import GrpcToRest
  817. response = await self.client.async_grpc_points.Search(
  818. grpc.SearchPoints(
  819. collection_name=self.collection_name,
  820. vector_name=self.vector_name,
  821. vector=embedding,
  822. with_payload=grpc.WithPayloadSelector(enable=True),
  823. with_vectors=grpc.WithVectorsSelector(enable=True),
  824. limit=fetch_k,
  825. )
  826. )
  827. results = [
  828. GrpcToRest.convert_vectors(result.vectors) for result in response.result
  829. ]
  830. embeddings: List[List[float]] = [
  831. result.get(self.vector_name) # type: ignore
  832. if isinstance(result, dict)
  833. else result
  834. for result in results
  835. ]
  836. mmr_selected: List[int] = maximal_marginal_relevance(
  837. np.array(embedding),
  838. embeddings,
  839. k=k,
  840. lambda_mult=lambda_mult,
  841. )
  842. return [
  843. (
  844. self._document_from_scored_point_grpc(
  845. response.result[i],
  846. self.content_payload_key,
  847. self.metadata_payload_key,
  848. ),
  849. response.result[i].score,
  850. )
  851. for i in mmr_selected
  852. ]
  853. def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
  854. """Delete by vector ID or other criteria.
  855. Args:
  856. ids: List of ids to delete.
  857. **kwargs: Other keyword arguments that subclasses might use.
  858. Returns:
  859. Optional[bool]: True if deletion is successful,
  860. False otherwise, None if not implemented.
  861. """
  862. from qdrant_client.http import models as rest
  863. result = self.client.delete(
  864. collection_name=self.collection_name,
  865. points_selector=ids,
  866. )
  867. return result.status == rest.UpdateStatus.COMPLETED
  868. @classmethod
  869. def from_texts(
  870. cls: Type[Qdrant],
  871. texts: List[str],
  872. embedding: Embeddings,
  873. metadatas: Optional[List[dict]] = None,
  874. ids: Optional[Sequence[str]] = None,
  875. location: Optional[str] = None,
  876. url: Optional[str] = None,
  877. port: Optional[int] = 6333,
  878. grpc_port: int = 6334,
  879. prefer_grpc: bool = False,
  880. https: Optional[bool] = None,
  881. api_key: Optional[str] = None,
  882. prefix: Optional[str] = None,
  883. timeout: Optional[float] = None,
  884. host: Optional[str] = None,
  885. path: Optional[str] = None,
  886. collection_name: Optional[str] = None,
  887. distance_func: str = "Cosine",
  888. content_payload_key: str = CONTENT_KEY,
  889. metadata_payload_key: str = METADATA_KEY,
  890. vector_name: Optional[str] = VECTOR_NAME,
  891. batch_size: int = 64,
  892. shard_number: Optional[int] = None,
  893. replication_factor: Optional[int] = None,
  894. write_consistency_factor: Optional[int] = None,
  895. on_disk_payload: Optional[bool] = None,
  896. hnsw_config: Optional[common_types.HnswConfigDiff] = None,
  897. optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
  898. wal_config: Optional[common_types.WalConfigDiff] = None,
  899. quantization_config: Optional[common_types.QuantizationConfig] = None,
  900. init_from: Optional[common_types.InitFrom] = None,
  901. force_recreate: bool = False,
  902. **kwargs: Any,
  903. ) -> Qdrant:
  904. """Construct Qdrant wrapper from a list of texts.
  905. Args:
  906. texts: A list of texts to be indexed in Qdrant.
  907. embedding: A subclass of `Embeddings`, responsible for text vectorization.
  908. metadatas:
  909. An optional list of metadata. If provided it has to be of the same
  910. length as a list of texts.
  911. ids:
  912. Optional list of ids to associate with the texts. Ids have to be
  913. uuid-like strings.
  914. location:
  915. If `:memory:` - use in-memory Qdrant instance.
  916. If `str` - use it as a `url` parameter.
  917. If `None` - fallback to relying on `host` and `port` parameters.
  918. url: either host or str of "Optional[scheme], host, Optional[port],
  919. Optional[prefix]". Default: `None`
  920. port: Port of the REST API interface. Default: 6333
  921. grpc_port: Port of the gRPC interface. Default: 6334
  922. prefer_grpc:
  923. If true - use gPRC interface whenever possible in custom methods.
  924. Default: False
  925. https: If true - use HTTPS(SSL) protocol. Default: None
  926. api_key: API key for authentication in Qdrant Cloud. Default: None
  927. prefix:
  928. If not None - add prefix to the REST URL path.
  929. Example: service/v1 will result in
  930. http://localhost:6333/service/v1/{qdrant-endpoint} for REST API.
  931. Default: None
  932. timeout:
  933. Timeout for REST and gRPC API requests.
  934. Default: 5.0 seconds for REST and unlimited for gRPC
  935. host:
  936. Host name of Qdrant service. If url and host are None, set to
  937. 'localhost'. Default: None
  938. path:
  939. Path in which the vectors will be stored while using local mode.
  940. Default: None
  941. collection_name:
  942. Name of the Qdrant collection to be used. If not provided,
  943. it will be created randomly. Default: None
  944. distance_func:
  945. Distance function. One of: "Cosine" / "Euclid" / "Dot".
  946. Default: "Cosine"
  947. content_payload_key:
  948. A payload key used to store the content of the document.
  949. Default: "page_content"
  950. metadata_payload_key:
  951. A payload key used to store the metadata of the document.
  952. Default: "metadata"
  953. vector_name:
  954. Name of the vector to be used internally in Qdrant.
  955. Default: None
  956. batch_size:
  957. How many vectors upload per-request.
  958. Default: 64
  959. shard_number: Number of shards in collection. Default is 1, minimum is 1.
  960. replication_factor:
  961. Replication factor for collection. Default is 1, minimum is 1.
  962. Defines how many copies of each shard will be created.
  963. Have effect only in distributed mode.
  964. write_consistency_factor:
  965. Write consistency factor for collection. Default is 1, minimum is 1.
  966. Defines how many replicas should apply the operation for us to consider
  967. it successful. Increasing this number will make the collection more
  968. resilient to inconsistencies, but will also make it fail if not enough
  969. replicas are available.
  970. Does not have any performance impact.
  971. Have effect only in distributed mode.
  972. on_disk_payload:
  973. If true - point`s payload will not be stored in memory.
  974. It will be read from the disk every time it is requested.
  975. This setting saves RAM by (slightly) increasing the response time.
  976. Note: those payload values that are involved in filtering and are
  977. indexed - remain in RAM.
  978. hnsw_config: Params for HNSW index
  979. optimizers_config: Params for optimizer
  980. wal_config: Params for Write-Ahead-Log
  981. quantization_config:
  982. Params for quantization, if None - quantization will be disabled
  983. init_from:
  984. Use data stored in another collection to initialize this collection
  985. force_recreate:
  986. Force recreating the collection
  987. **kwargs:
  988. Additional arguments passed directly into REST client initialization
  989. This is a user-friendly interface that:
  990. 1. Creates embeddings, one for each text
  991. 2. Initializes the Qdrant database as an in-memory docstore by default
  992. (and overridable to a remote docstore)
  993. 3. Adds the text embeddings to the Qdrant database
  994. This is intended to be a quick way to get started.
  995. Example:
  996. .. code-block:: python
  997. from langchain import Qdrant
  998. from langchain.embeddings import OpenAIEmbeddings
  999. embeddings = OpenAIEmbeddings()
  1000. qdrant = Qdrant.from_texts(texts, embeddings, "localhost")
  1001. """
  1002. qdrant = cls._construct_instance(
  1003. texts,
  1004. embedding,
  1005. metadatas,
  1006. ids,
  1007. location,
  1008. url,
  1009. port,
  1010. grpc_port,
  1011. prefer_grpc,
  1012. https,
  1013. api_key,
  1014. prefix,
  1015. timeout,
  1016. host,
  1017. path,
  1018. collection_name,
  1019. distance_func,
  1020. content_payload_key,
  1021. metadata_payload_key,
  1022. vector_name,
  1023. shard_number,
  1024. replication_factor,
  1025. write_consistency_factor,
  1026. on_disk_payload,
  1027. hnsw_config,
  1028. optimizers_config,
  1029. wal_config,
  1030. quantization_config,
  1031. init_from,
  1032. force_recreate,
  1033. **kwargs,
  1034. )
  1035. qdrant.add_texts(texts, metadatas, ids, batch_size)
  1036. return qdrant
  1037. @classmethod
  1038. @sync_call_fallback
  1039. async def afrom_texts(
  1040. cls: Type[Qdrant],
  1041. texts: List[str],
  1042. embedding: Embeddings,
  1043. metadatas: Optional[List[dict]] = None,
  1044. ids: Optional[Sequence[str]] = None,
  1045. location: Optional[str] = None,
  1046. url: Optional[str] = None,
  1047. port: Optional[int] = 6333,
  1048. grpc_port: int = 6334,
  1049. prefer_grpc: bool = False,
  1050. https: Optional[bool] = None,
  1051. api_key: Optional[str] = None,
  1052. prefix: Optional[str] = None,
  1053. timeout: Optional[float] = None,
  1054. host: Optional[str] = None,
  1055. path: Optional[str] = None,
  1056. collection_name: Optional[str] = None,
  1057. distance_func: str = "Cosine",
  1058. content_payload_key: str = CONTENT_KEY,
  1059. metadata_payload_key: str = METADATA_KEY,
  1060. vector_name: Optional[str] = VECTOR_NAME,
  1061. batch_size: int = 64,
  1062. shard_number: Optional[int] = None,
  1063. replication_factor: Optional[int] = None,
  1064. write_consistency_factor: Optional[int] = None,
  1065. on_disk_payload: Optional[bool] = None,
  1066. hnsw_config: Optional[common_types.HnswConfigDiff] = None,
  1067. optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
  1068. wal_config: Optional[common_types.WalConfigDiff] = None,
  1069. quantization_config: Optional[common_types.QuantizationConfig] = None,
  1070. init_from: Optional[common_types.InitFrom] = None,
  1071. force_recreate: bool = False,
  1072. **kwargs: Any,
  1073. ) -> Qdrant:
  1074. """Construct Qdrant wrapper from a list of texts.
  1075. Args:
  1076. texts: A list of texts to be indexed in Qdrant.
  1077. embedding: A subclass of `Embeddings`, responsible for text vectorization.
  1078. metadatas:
  1079. An optional list of metadata. If provided it has to be of the same
  1080. length as a list of texts.
  1081. ids:
  1082. Optional list of ids to associate with the texts. Ids have to be
  1083. uuid-like strings.
  1084. location:
  1085. If `:memory:` - use in-memory Qdrant instance.
  1086. If `str` - use it as a `url` parameter.
  1087. If `None` - fallback to relying on `host` and `port` parameters.
  1088. url: either host or str of "Optional[scheme], host, Optional[port],
  1089. Optional[prefix]". Default: `None`
  1090. port: Port of the REST API interface. Default: 6333
  1091. grpc_port: Port of the gRPC interface. Default: 6334
  1092. prefer_grpc:
  1093. If true - use gPRC interface whenever possible in custom methods.
  1094. Default: False
  1095. https: If true - use HTTPS(SSL) protocol. Default: None
  1096. api_key: API key for authentication in Qdrant Cloud. Default: None
  1097. prefix:
  1098. If not None - add prefix to the REST URL path.
  1099. Example: service/v1 will result in
  1100. http://localhost:6333/service/v1/{qdrant-endpoint} for REST API.
  1101. Default: None
  1102. timeout:
  1103. Timeout for REST and gRPC API requests.
  1104. Default: 5.0 seconds for REST and unlimited for gRPC
  1105. host:
  1106. Host name of Qdrant service. If url and host are None, set to
  1107. 'localhost'. Default: None
  1108. path:
  1109. Path in which the vectors will be stored while using local mode.
  1110. Default: None
  1111. collection_name:
  1112. Name of the Qdrant collection to be used. If not provided,
  1113. it will be created randomly. Default: None
  1114. distance_func:
  1115. Distance function. One of: "Cosine" / "Euclid" / "Dot".
  1116. Default: "Cosine"
  1117. content_payload_key:
  1118. A payload key used to store the content of the document.
  1119. Default: "page_content"
  1120. metadata_payload_key:
  1121. A payload key used to store the metadata of the document.
  1122. Default: "metadata"
  1123. vector_name:
  1124. Name of the vector to be used internally in Qdrant.
  1125. Default: None
  1126. batch_size:
  1127. How many vectors upload per-request.
  1128. Default: 64
  1129. shard_number: Number of shards in collection. Default is 1, minimum is 1.
  1130. replication_factor:
  1131. Replication factor for collection. Default is 1, minimum is 1.
  1132. Defines how many copies of each shard will be created.
  1133. Have effect only in distributed mode.
  1134. write_consistency_factor:
  1135. Write consistency factor for collection. Default is 1, minimum is 1.
  1136. Defines how many replicas should apply the operation for us to consider
  1137. it successful. Increasing this number will make the collection more
  1138. resilient to inconsistencies, but will also make it fail if not enough
  1139. replicas are available.
  1140. Does not have any performance impact.
  1141. Have effect only in distributed mode.
  1142. on_disk_payload:
  1143. If true - point`s payload will not be stored in memory.
  1144. It will be read from the disk every time it is requested.
  1145. This setting saves RAM by (slightly) increasing the response time.
  1146. Note: those payload values that are involved in filtering and are
  1147. indexed - remain in RAM.
  1148. hnsw_config: Params for HNSW index
  1149. optimizers_config: Params for optimizer
  1150. wal_config: Params for Write-Ahead-Log
  1151. quantization_config:
  1152. Params for quantization, if None - quantization will be disabled
  1153. init_from:
  1154. Use data stored in another collection to initialize this collection
  1155. force_recreate:
  1156. Force recreating the collection
  1157. **kwargs:
  1158. Additional arguments passed directly into REST client initialization
  1159. This is a user-friendly interface that:
  1160. 1. Creates embeddings, one for each text
  1161. 2. Initializes the Qdrant database as an in-memory docstore by default
  1162. (and overridable to a remote docstore)
  1163. 3. Adds the text embeddings to the Qdrant database
  1164. This is intended to be a quick way to get started.
  1165. Example:
  1166. .. code-block:: python
  1167. from langchain import Qdrant
  1168. from langchain.embeddings import OpenAIEmbeddings
  1169. embeddings = OpenAIEmbeddings()
  1170. qdrant = await Qdrant.afrom_texts(texts, embeddings, "localhost")
  1171. """
  1172. qdrant = cls._construct_instance(
  1173. texts,
  1174. embedding,
  1175. metadatas,
  1176. ids,
  1177. location,
  1178. url,
  1179. port,
  1180. grpc_port,
  1181. prefer_grpc,
  1182. https,
  1183. api_key,
  1184. prefix,
  1185. timeout,
  1186. host,
  1187. path,
  1188. collection_name,
  1189. distance_func,
  1190. content_payload_key,
  1191. metadata_payload_key,
  1192. vector_name,
  1193. shard_number,
  1194. replication_factor,
  1195. write_consistency_factor,
  1196. on_disk_payload,
  1197. hnsw_config,
  1198. optimizers_config,
  1199. wal_config,
  1200. quantization_config,
  1201. init_from,
  1202. force_recreate,
  1203. **kwargs,
  1204. )
  1205. await qdrant.aadd_texts(texts, metadatas, ids, batch_size)
  1206. return qdrant
  1207. @classmethod
  1208. def _construct_instance(
  1209. cls: Type[Qdrant],
  1210. texts: List[str],
  1211. embedding: Embeddings,
  1212. metadatas: Optional[List[dict]] = None,
  1213. ids: Optional[Sequence[str]] = None,
  1214. location: Optional[str] = None,
  1215. url: Optional[str] = None,
  1216. port: Optional[int] = 6333,
  1217. grpc_port: int = 6334,
  1218. prefer_grpc: bool = False,
  1219. https: Optional[bool] = None,
  1220. api_key: Optional[str] = None,
  1221. prefix: Optional[str] = None,
  1222. timeout: Optional[float] = None,
  1223. host: Optional[str] = None,
  1224. path: Optional[str] = None,
  1225. collection_name: Optional[str] = None,
  1226. distance_func: str = "Cosine",
  1227. content_payload_key: str = CONTENT_KEY,
  1228. metadata_payload_key: str = METADATA_KEY,
  1229. vector_name: Optional[str] = VECTOR_NAME,
  1230. shard_number: Optional[int] = None,
  1231. replication_factor: Optional[int] = None,
  1232. write_consistency_factor: Optional[int] = None,
  1233. on_disk_payload: Optional[bool] = None,
  1234. hnsw_config: Optional[common_types.HnswConfigDiff] = None,
  1235. optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
  1236. wal_config: Optional[common_types.WalConfigDiff] = None,
  1237. quantization_config: Optional[common_types.QuantizationConfig] = None,
  1238. init_from: Optional[common_types.InitFrom] = None,
  1239. force_recreate: bool = False,
  1240. **kwargs: Any,
  1241. ) -> Qdrant:
  1242. try:
  1243. import qdrant_client
  1244. except ImportError:
  1245. raise ValueError(
  1246. "Could not import qdrant-client python package. "
  1247. "Please install it with `pip install qdrant-client`."
  1248. )
  1249. from grpc import RpcError
  1250. from qdrant_client.http import models as rest
  1251. from qdrant_client.http.exceptions import UnexpectedResponse
  1252. # Just do a single quick embedding to get vector size
  1253. partial_embeddings = embedding.embed_documents(texts[:1])
  1254. vector_size = len(partial_embeddings[0])
  1255. collection_name = collection_name or uuid.uuid4().hex
  1256. distance_func = distance_func.upper()
  1257. client = qdrant_client.QdrantClient(
  1258. location=location,
  1259. url=url,
  1260. port=port,
  1261. grpc_port=grpc_port,
  1262. prefer_grpc=prefer_grpc,
  1263. https=https,
  1264. api_key=api_key,
  1265. prefix=prefix,
  1266. timeout=timeout,
  1267. host=host,
  1268. path=path,
  1269. **kwargs,
  1270. )
  1271. try:
  1272. # Skip any validation in case of forced collection recreate.
  1273. if force_recreate:
  1274. raise ValueError
  1275. # Get the vector configuration of the existing collection and vector, if it
  1276. # was specified. If the old configuration does not match the current one,
  1277. # an exception is being thrown.
  1278. collection_info = client.get_collection(collection_name=collection_name)
  1279. current_vector_config = collection_info.config.params.vectors
  1280. if isinstance(current_vector_config, dict) and vector_name is not None:
  1281. if vector_name not in current_vector_config:
  1282. raise QdrantException(
  1283. f"Existing Qdrant collection {collection_name} does not "
  1284. f"contain vector named {vector_name}. Did you mean one of the "
  1285. f"existing vectors: {', '.join(current_vector_config.keys())}? "
  1286. f"If you want to recreate the collection, set `force_recreate` "
  1287. f"parameter to `True`."
  1288. )
  1289. current_vector_config = current_vector_config.get(
  1290. vector_name
  1291. ) # type: ignore[assignment]
  1292. elif isinstance(current_vector_config, dict) and vector_name is None:
  1293. raise QdrantException(
  1294. f"Existing Qdrant collection {collection_name} uses named vectors. "
  1295. f"If you want to reuse it, please set `vector_name` to any of the "
  1296. f"existing named vectors: "
  1297. f"{', '.join(current_vector_config.keys())}." # noqa
  1298. f"If you want to recreate the collection, set `force_recreate` "
  1299. f"parameter to `True`."
  1300. )
  1301. elif (
  1302. not isinstance(current_vector_config, dict) and vector_name is not None
  1303. ):
  1304. raise QdrantException(
  1305. f"Existing Qdrant collection {collection_name} doesn't use named "
  1306. f"vectors. If you want to reuse it, please set `vector_name` to "
  1307. f"`None`. If you want to recreate the collection, set "
  1308. f"`force_recreate` parameter to `True`."
  1309. )
  1310. # Check if the vector configuration has the same dimensionality.
  1311. if current_vector_config.size != vector_size: # type: ignore[union-attr]
  1312. raise QdrantException(
  1313. f"Existing Qdrant collection is configured for vectors with "
  1314. f"{current_vector_config.size} " # type: ignore[union-attr]
  1315. f"dimensions. Selected embeddings are {vector_size}-dimensional. "
  1316. f"If you want to recreate the collection, set `force_recreate` "
  1317. f"parameter to `True`."
  1318. )
  1319. current_distance_func = (
  1320. current_vector_config.distance.name.upper() # type: ignore[union-attr]
  1321. )
  1322. if current_distance_func != distance_func:
  1323. raise QdrantException(
  1324. f"Existing Qdrant collection is configured for "
  1325. f"{current_vector_config.distance} " # type: ignore[union-attr]
  1326. f"similarity. Please set `distance_func` parameter to "
  1327. f"`{distance_func}` if you want to reuse it. If you want to "
  1328. f"recreate the collection, set `force_recreate` parameter to "
  1329. f"`True`."
  1330. )
  1331. except (UnexpectedResponse, RpcError, ValueError):
  1332. vectors_config = rest.VectorParams(
  1333. size=vector_size,
  1334. distance=rest.Distance[distance_func],
  1335. )
  1336. # If vector name was provided, we're going to use the named vectors feature
  1337. # with just a single vector.
  1338. if vector_name is not None:
  1339. vectors_config = { # type: ignore[assignment]
  1340. vector_name: vectors_config,
  1341. }
  1342. client.recreate_collection(
  1343. collection_name=collection_name,
  1344. vectors_config=vectors_config,
  1345. shard_number=shard_number,
  1346. replication_factor=replication_factor,
  1347. write_consistency_factor=write_consistency_factor,
  1348. on_disk_payload=on_disk_payload,
  1349. hnsw_config=hnsw_config,
  1350. optimizers_config=optimizers_config,
  1351. wal_config=wal_config,
  1352. quantization_config=quantization_config,
  1353. init_from=init_from,
  1354. timeout=timeout, # type: ignore[arg-type]
  1355. )
  1356. qdrant = cls(
  1357. client=client,
  1358. collection_name=collection_name,
  1359. embeddings=embedding,
  1360. content_payload_key=content_payload_key,
  1361. metadata_payload_key=metadata_payload_key,
  1362. distance_strategy=distance_func,
  1363. vector_name=vector_name,
  1364. )
  1365. return qdrant
  1366. def _select_relevance_score_fn(self) -> Callable[[float], float]:
  1367. """
  1368. The 'correct' relevance function
  1369. may differ depending on a few things, including:
  1370. - the distance / similarity metric used by the VectorStore
  1371. - the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
  1372. - embedding dimensionality
  1373. - etc.
  1374. """
  1375. if self.distance_strategy == "COSINE":
  1376. return self._cosine_relevance_score_fn
  1377. elif self.distance_strategy == "DOT":
  1378. return self._max_inner_product_relevance_score_fn
  1379. elif self.distance_strategy == "EUCLID":
  1380. return self._euclidean_relevance_score_fn
  1381. else:
  1382. raise ValueError(
  1383. "Unknown distance strategy, must be cosine, "
  1384. "max_inner_product, or euclidean"
  1385. )
  1386. def _similarity_search_with_relevance_scores(
  1387. self,
  1388. query: str,
  1389. k: int = 4,
  1390. **kwargs: Any,
  1391. ) -> List[Tuple[Document, float]]:
  1392. """Return docs and relevance scores in the range [0, 1].
  1393. 0 is dissimilar, 1 is most similar.
  1394. Args:
  1395. query: input text
  1396. k: Number of Documents to return. Defaults to 4.
  1397. **kwargs: kwargs to be passed to similarity search. Should include:
  1398. score_threshold: Optional, a floating point value between 0 to 1 to
  1399. filter the resulting set of retrieved docs
  1400. Returns:
  1401. List of Tuples of (doc, similarity_score)
  1402. """
  1403. return self.similarity_search_with_score(query, k, **kwargs)
  1404. @classmethod
  1405. def _build_payloads(
  1406. cls,
  1407. texts: Iterable[str],
  1408. metadatas: Optional[List[dict]],
  1409. content_payload_key: str,
  1410. metadata_payload_key: str,
  1411. ) -> List[dict]:
  1412. payloads = []
  1413. for i, text in enumerate(texts):
  1414. if text is None:
  1415. raise ValueError(
  1416. "At least one of the texts is None. Please remove it before "
  1417. "calling .from_texts or .add_texts on Qdrant instance."
  1418. )
  1419. metadata = metadatas[i] if metadatas is not None else None
  1420. payloads.append(
  1421. {
  1422. content_payload_key: text,
  1423. metadata_payload_key: metadata,
  1424. }
  1425. )
  1426. return payloads
  1427. @classmethod
  1428. def _document_from_scored_point(
  1429. cls,
  1430. scored_point: Any,
  1431. content_payload_key: str,
  1432. metadata_payload_key: str,
  1433. ) -> Document:
  1434. return Document(
  1435. page_content=scored_point.payload.get(content_payload_key),
  1436. metadata=scored_point.payload.get(metadata_payload_key) or {},
  1437. )
  1438. @classmethod
  1439. def _document_from_scored_point_grpc(
  1440. cls,
  1441. scored_point: Any,
  1442. content_payload_key: str,
  1443. metadata_payload_key: str,
  1444. ) -> Document:
  1445. from qdrant_client.conversions.conversion import grpc_to_payload
  1446. payload = grpc_to_payload(scored_point.payload)
  1447. return Document(
  1448. page_content=payload[content_payload_key],
  1449. metadata=payload.get(metadata_payload_key) or {},
  1450. )
  1451. def _build_condition(self, key: str, value: Any) -> List[rest.FieldCondition]:
  1452. from qdrant_client.http import models as rest
  1453. out = []
  1454. if isinstance(value, dict):
  1455. for _key, value in value.items():
  1456. out.extend(self._build_condition(f"{key}.{_key}", value))
  1457. elif isinstance(value, list):
  1458. for _value in value:
  1459. if isinstance(_value, dict):
  1460. out.extend(self._build_condition(f"{key}[]", _value))
  1461. else:
  1462. out.extend(self._build_condition(f"{key}", _value))
  1463. else:
  1464. out.append(
  1465. rest.FieldCondition(
  1466. key=f"{self.metadata_payload_key}.{key}",
  1467. match=rest.MatchValue(value=value),
  1468. )
  1469. )
  1470. return out
  1471. def _qdrant_filter_from_dict(
  1472. self, filter: Optional[DictFilter]
  1473. ) -> Optional[rest.Filter]:
  1474. from qdrant_client.http import models as rest
  1475. if not filter:
  1476. return None
  1477. return rest.Filter(
  1478. must=[
  1479. condition
  1480. for key, value in filter.items()
  1481. for condition in self._build_condition(key, value)
  1482. ]
  1483. )
  1484. def _embed_query(self, query: str) -> List[float]:
  1485. """Embed query text.
  1486. Used to provide backward compatibility with `embedding_function` argument.
  1487. Args:
  1488. query: Query text.
  1489. Returns:
  1490. List of floats representing the query embedding.
  1491. """
  1492. if self.embeddings is not None:
  1493. embedding = self.embeddings.embed_query(query)
  1494. else:
  1495. if self._embeddings_function is not None:
  1496. embedding = self._embeddings_function(query)
  1497. else:
  1498. raise ValueError("Neither of embeddings or embedding_function is set")
  1499. return embedding.tolist() if hasattr(embedding, "tolist") else embedding
  1500. def _embed_texts(self, texts: Iterable[str]) -> List[List[float]]:
  1501. """Embed search texts.
  1502. Used to provide backward compatibility with `embedding_function` argument.
  1503. Args:
  1504. texts: Iterable of texts to embed.
  1505. Returns:
  1506. List of floats representing the texts embedding.
  1507. """
  1508. if self.embeddings is not None:
  1509. embeddings = self.embeddings.embed_documents(list(texts))
  1510. if hasattr(embeddings, "tolist"):
  1511. embeddings = embeddings.tolist()
  1512. elif self._embeddings_function is not None:
  1513. embeddings = []
  1514. for text in texts:
  1515. embedding = self._embeddings_function(text)
  1516. if hasattr(embeddings, "tolist"):
  1517. embedding = embedding.tolist()
  1518. embeddings.append(embedding)
  1519. else:
  1520. raise ValueError("Neither of embeddings or embedding_function is set")
  1521. return embeddings
  1522. def _generate_rest_batches(
  1523. self,
  1524. texts: Iterable[str],
  1525. metadatas: Optional[List[dict]] = None,
  1526. ids: Optional[Sequence[str]] = None,
  1527. batch_size: int = 64,
  1528. ) -> Generator[Tuple[List[str], List[rest.PointStruct]], None, None]:
  1529. from qdrant_client.http import models as rest
  1530. texts_iterator = iter(texts)
  1531. metadatas_iterator = iter(metadatas or [])
  1532. ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
  1533. while batch_texts := list(islice(texts_iterator, batch_size)):
  1534. # Take the corresponding metadata and id for each text in a batch
  1535. batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
  1536. batch_ids = list(islice(ids_iterator, batch_size))
  1537. # Generate the embeddings for all the texts in a batch
  1538. batch_embeddings = self._embed_texts(batch_texts)
  1539. points = [
  1540. rest.PointStruct(
  1541. id=point_id,
  1542. vector=vector
  1543. if self.vector_name is None
  1544. else {self.vector_name: vector},
  1545. payload=payload,
  1546. )
  1547. for point_id, vector, payload in zip(
  1548. batch_ids,
  1549. batch_embeddings,
  1550. self._build_payloads(
  1551. batch_texts,
  1552. batch_metadatas,
  1553. self.content_payload_key,
  1554. self.metadata_payload_key,
  1555. ),
  1556. )
  1557. ]
  1558. yield batch_ids, points