xinference.py 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. import os
  2. import re
  3. from typing import List, Union
  4. import pytest
  5. from _pytest.monkeypatch import MonkeyPatch
  6. from requests import Response
  7. from requests.exceptions import ConnectionError
  8. from requests.sessions import Session
  9. from xinference_client.client.restful.restful_client import (Client, RESTfulChatglmCppChatModelHandle,
  10. RESTfulChatModelHandle, RESTfulEmbeddingModelHandle,
  11. RESTfulGenerateModelHandle, RESTfulRerankModelHandle)
  12. from xinference_client.types import Embedding, EmbeddingData, EmbeddingUsage
  13. class MockXinferenceClass(object):
  14. def get_chat_model(self: Client, model_uid: str) -> Union[RESTfulChatglmCppChatModelHandle, RESTfulGenerateModelHandle, RESTfulChatModelHandle]:
  15. if not re.match(r'https?:\/\/[^\s\/$.?#].[^\s]*$', self.base_url):
  16. raise RuntimeError('404 Not Found')
  17. if 'generate' == model_uid:
  18. return RESTfulGenerateModelHandle(model_uid, base_url=self.base_url, auth_headers={})
  19. if 'chat' == model_uid:
  20. return RESTfulChatModelHandle(model_uid, base_url=self.base_url, auth_headers={})
  21. if 'embedding' == model_uid:
  22. return RESTfulEmbeddingModelHandle(model_uid, base_url=self.base_url, auth_headers={})
  23. if 'rerank' == model_uid:
  24. return RESTfulRerankModelHandle(model_uid, base_url=self.base_url, auth_headers={})
  25. raise RuntimeError('404 Not Found')
  26. def get(self: Session, url: str, **kwargs):
  27. response = Response()
  28. if 'v1/models/' in url:
  29. # get model uid
  30. model_uid = url.split('/')[-1] or ''
  31. if not re.match(r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', model_uid) and \
  32. model_uid not in ['generate', 'chat', 'embedding', 'rerank']:
  33. response.status_code = 404
  34. response._content = b'{}'
  35. return response
  36. # check if url is valid
  37. if not re.match(r'^(https?):\/\/[^\s\/$.?#].[^\s]*$', url):
  38. response.status_code = 404
  39. response._content = b'{}'
  40. return response
  41. if model_uid in ['generate', 'chat']:
  42. response.status_code = 200
  43. response._content = b'''{
  44. "model_type": "LLM",
  45. "address": "127.0.0.1:43877",
  46. "accelerators": [
  47. "0",
  48. "1"
  49. ],
  50. "model_name": "chatglm3-6b",
  51. "model_lang": [
  52. "en"
  53. ],
  54. "model_ability": [
  55. "generate",
  56. "chat"
  57. ],
  58. "model_description": "latest chatglm3",
  59. "model_format": "pytorch",
  60. "model_size_in_billions": 7,
  61. "quantization": "none",
  62. "model_hub": "huggingface",
  63. "revision": null,
  64. "context_length": 2048,
  65. "replica": 1
  66. }'''
  67. return response
  68. elif model_uid == 'embedding':
  69. response.status_code = 200
  70. response._content = b'''{
  71. "model_type": "embedding",
  72. "address": "127.0.0.1:43877",
  73. "accelerators": [
  74. "0",
  75. "1"
  76. ],
  77. "model_name": "bge",
  78. "model_lang": [
  79. "en"
  80. ],
  81. "revision": null,
  82. "max_tokens": 512
  83. }'''
  84. return response
  85. elif 'v1/cluster/auth' in url:
  86. response.status_code = 200
  87. response._content = b'''{
  88. "auth": true
  89. }'''
  90. return response
  91. def _check_cluster_authenticated(self):
  92. self._cluster_authed = True
  93. def rerank(self: RESTfulRerankModelHandle, documents: List[str], query: str, top_n: int) -> dict:
  94. # check if self._model_uid is a valid uuid
  95. if not re.match(r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', self._model_uid) and \
  96. self._model_uid != 'rerank':
  97. raise RuntimeError('404 Not Found')
  98. if not re.match(r'^(https?):\/\/[^\s\/$.?#].[^\s]*$', self._base_url):
  99. raise RuntimeError('404 Not Found')
  100. if top_n is None:
  101. top_n = 1
  102. return {
  103. 'results': [
  104. {
  105. 'index': i,
  106. 'document': doc,
  107. 'relevance_score': 0.9
  108. }
  109. for i, doc in enumerate(documents[:top_n])
  110. ]
  111. }
  112. def create_embedding(
  113. self: RESTfulGenerateModelHandle,
  114. input: Union[str, List[str]],
  115. **kwargs
  116. ) -> dict:
  117. # check if self._model_uid is a valid uuid
  118. if not re.match(r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', self._model_uid) and \
  119. self._model_uid != 'embedding':
  120. raise RuntimeError('404 Not Found')
  121. if isinstance(input, str):
  122. input = [input]
  123. ipt_len = len(input)
  124. embedding = Embedding(
  125. object="list",
  126. model=self._model_uid,
  127. data=[
  128. EmbeddingData(
  129. index=i,
  130. object="embedding",
  131. embedding=[1919.810 for _ in range(768)]
  132. )
  133. for i in range(ipt_len)
  134. ],
  135. usage=EmbeddingUsage(
  136. prompt_tokens=ipt_len,
  137. total_tokens=ipt_len
  138. )
  139. )
  140. return embedding
  141. MOCK = os.getenv('MOCK_SWITCH', 'false').lower() == 'true'
  142. @pytest.fixture
  143. def setup_xinference_mock(request, monkeypatch: MonkeyPatch):
  144. if MOCK:
  145. monkeypatch.setattr(Client, 'get_model', MockXinferenceClass.get_chat_model)
  146. monkeypatch.setattr(Client, '_check_cluster_authenticated', MockXinferenceClass._check_cluster_authenticated)
  147. monkeypatch.setattr(Session, 'get', MockXinferenceClass.get)
  148. monkeypatch.setattr(RESTfulEmbeddingModelHandle, 'create_embedding', MockXinferenceClass.create_embedding)
  149. monkeypatch.setattr(RESTfulRerankModelHandle, 'rerank', MockXinferenceClass.rerank)
  150. yield
  151. if MOCK:
  152. monkeypatch.undo()