completion.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. # -*- coding:utf-8 -*-
  2. import json
  3. import logging
  4. from typing import Generator, Union
  5. from flask import Response, stream_with_context
  6. from flask_restful import reqparse
  7. from werkzeug.exceptions import InternalServerError, NotFound
  8. import services
  9. from controllers.web import api
  10. from controllers.web.error import AppUnavailableError, ConversationCompletedError, \
  11. ProviderNotInitializeError, NotChatAppError, NotCompletionAppError, CompletionRequestError, \
  12. ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError
  13. from controllers.web.wraps import WebApiResource
  14. from core.conversation_message_task import PubHandler
  15. from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \
  16. LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
  17. from libs.helper import uuid_value
  18. from services.completion_service import CompletionService
  19. # define completion api for user
  20. class CompletionApi(WebApiResource):
  21. def post(self, app_model, end_user):
  22. if app_model.mode != 'completion':
  23. raise NotCompletionAppError()
  24. parser = reqparse.RequestParser()
  25. parser.add_argument('inputs', type=dict, required=True, location='json')
  26. parser.add_argument('query', type=str, location='json')
  27. parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
  28. args = parser.parse_args()
  29. streaming = args['response_mode'] == 'streaming'
  30. try:
  31. response = CompletionService.completion(
  32. app_model=app_model,
  33. user=end_user,
  34. args=args,
  35. from_source='api',
  36. streaming=streaming
  37. )
  38. return compact_response(response)
  39. except services.errors.conversation.ConversationNotExistsError:
  40. raise NotFound("Conversation Not Exists.")
  41. except services.errors.conversation.ConversationCompletedError:
  42. raise ConversationCompletedError()
  43. except services.errors.app_model_config.AppModelConfigBrokenError:
  44. logging.exception("App model config broken.")
  45. raise AppUnavailableError()
  46. except ProviderTokenNotInitError:
  47. raise ProviderNotInitializeError()
  48. except QuotaExceededError:
  49. raise ProviderQuotaExceededError()
  50. except ModelCurrentlyNotSupportError:
  51. raise ProviderModelCurrentlyNotSupportError()
  52. except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
  53. LLMRateLimitError, LLMAuthorizationError) as e:
  54. raise CompletionRequestError(str(e))
  55. except ValueError as e:
  56. raise e
  57. except Exception as e:
  58. logging.exception("internal server error.")
  59. raise InternalServerError()
  60. class CompletionStopApi(WebApiResource):
  61. def post(self, app_model, end_user, task_id):
  62. if app_model.mode != 'completion':
  63. raise NotCompletionAppError()
  64. PubHandler.stop(end_user, task_id)
  65. return {'result': 'success'}, 200
  66. class ChatApi(WebApiResource):
  67. def post(self, app_model, end_user):
  68. if app_model.mode != 'chat':
  69. raise NotChatAppError()
  70. parser = reqparse.RequestParser()
  71. parser.add_argument('inputs', type=dict, required=True, location='json')
  72. parser.add_argument('query', type=str, required=True, location='json')
  73. parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
  74. parser.add_argument('conversation_id', type=uuid_value, location='json')
  75. args = parser.parse_args()
  76. streaming = args['response_mode'] == 'streaming'
  77. try:
  78. response = CompletionService.completion(
  79. app_model=app_model,
  80. user=end_user,
  81. args=args,
  82. from_source='api',
  83. streaming=streaming
  84. )
  85. return compact_response(response)
  86. except services.errors.conversation.ConversationNotExistsError:
  87. raise NotFound("Conversation Not Exists.")
  88. except services.errors.conversation.ConversationCompletedError:
  89. raise ConversationCompletedError()
  90. except services.errors.app_model_config.AppModelConfigBrokenError:
  91. logging.exception("App model config broken.")
  92. raise AppUnavailableError()
  93. except ProviderTokenNotInitError:
  94. raise ProviderNotInitializeError()
  95. except QuotaExceededError:
  96. raise ProviderQuotaExceededError()
  97. except ModelCurrentlyNotSupportError:
  98. raise ProviderModelCurrentlyNotSupportError()
  99. except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
  100. LLMRateLimitError, LLMAuthorizationError) as e:
  101. raise CompletionRequestError(str(e))
  102. except ValueError as e:
  103. raise e
  104. except Exception as e:
  105. logging.exception("internal server error.")
  106. raise InternalServerError()
  107. class ChatStopApi(WebApiResource):
  108. def post(self, app_model, end_user, task_id):
  109. if app_model.mode != 'chat':
  110. raise NotChatAppError()
  111. PubHandler.stop(end_user, task_id)
  112. return {'result': 'success'}, 200
  113. def compact_response(response: Union[dict | Generator]) -> Response:
  114. if isinstance(response, dict):
  115. return Response(response=json.dumps(response), status=200, mimetype='application/json')
  116. else:
  117. def generate() -> Generator:
  118. try:
  119. for chunk in response:
  120. yield chunk
  121. except services.errors.conversation.ConversationNotExistsError:
  122. yield "data: " + json.dumps(api.handle_error(NotFound("Conversation Not Exists.")).get_json()) + "\n\n"
  123. except services.errors.conversation.ConversationCompletedError:
  124. yield "data: " + json.dumps(api.handle_error(ConversationCompletedError()).get_json()) + "\n\n"
  125. except services.errors.app_model_config.AppModelConfigBrokenError:
  126. logging.exception("App model config broken.")
  127. yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
  128. except ProviderTokenNotInitError:
  129. yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
  130. except QuotaExceededError:
  131. yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
  132. except ModelCurrentlyNotSupportError:
  133. yield "data: " + json.dumps(api.handle_error(ProviderModelCurrentlyNotSupportError()).get_json()) + "\n\n"
  134. except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
  135. LLMRateLimitError, LLMAuthorizationError) as e:
  136. yield "data: " + json.dumps(api.handle_error(CompletionRequestError(str(e))).get_json()) + "\n\n"
  137. except ValueError as e:
  138. yield "data: " + json.dumps(api.handle_error(e).get_json()) + "\n\n"
  139. except Exception:
  140. logging.exception("internal server error.")
  141. yield "data: " + json.dumps(api.handle_error(InternalServerError()).get_json()) + "\n\n"
  142. return Response(stream_with_context(generate()), status=200,
  143. mimetype='text/event-stream')
  144. api.add_resource(CompletionApi, '/completion-messages')
  145. api.add_resource(CompletionStopApi, '/completion-messages/<string:task_id>/stop')
  146. api.add_resource(ChatApi, '/chat-messages')
  147. api.add_resource(ChatStopApi, '/chat-messages/<string:task_id>/stop')