audio_service.py 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. import io
  2. import logging
  3. from typing import Optional
  4. from werkzeug.datastructures import FileStorage
  5. from core.model_manager import ModelManager
  6. from core.model_runtime.entities.model_entities import ModelType
  7. from models.model import App, AppMode, AppModelConfig, Message
  8. from services.errors.audio import (
  9. AudioTooLargeServiceError,
  10. NoAudioUploadedServiceError,
  11. ProviderNotSupportSpeechToTextServiceError,
  12. ProviderNotSupportTextToSpeechServiceError,
  13. UnsupportedAudioTypeServiceError,
  14. )
  15. FILE_SIZE = 30
  16. FILE_SIZE_LIMIT = FILE_SIZE * 1024 * 1024
  17. ALLOWED_EXTENSIONS = ['mp3', 'mp4', 'mpeg', 'mpga', 'm4a', 'wav', 'webm', 'amr']
  18. logger = logging.getLogger(__name__)
  19. class AudioService:
  20. @classmethod
  21. def transcript_asr(cls, app_model: App, file: FileStorage, end_user: Optional[str] = None):
  22. if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]:
  23. workflow = app_model.workflow
  24. if workflow is None:
  25. raise ValueError("Speech to text is not enabled")
  26. features_dict = workflow.features_dict
  27. if 'speech_to_text' not in features_dict or not features_dict['speech_to_text'].get('enabled'):
  28. raise ValueError("Speech to text is not enabled")
  29. else:
  30. app_model_config: AppModelConfig = app_model.app_model_config
  31. if not app_model_config.speech_to_text_dict['enabled']:
  32. raise ValueError("Speech to text is not enabled")
  33. if file is None:
  34. raise NoAudioUploadedServiceError()
  35. extension = file.mimetype
  36. if extension not in [f'audio/{ext}' for ext in ALLOWED_EXTENSIONS]:
  37. raise UnsupportedAudioTypeServiceError()
  38. file_content = file.read()
  39. file_size = len(file_content)
  40. if file_size > FILE_SIZE_LIMIT:
  41. message = f"Audio size larger than {FILE_SIZE} mb"
  42. raise AudioTooLargeServiceError(message)
  43. model_manager = ModelManager()
  44. model_instance = model_manager.get_default_model_instance(
  45. tenant_id=app_model.tenant_id,
  46. model_type=ModelType.SPEECH2TEXT
  47. )
  48. if model_instance is None:
  49. raise ProviderNotSupportSpeechToTextServiceError()
  50. buffer = io.BytesIO(file_content)
  51. buffer.name = 'temp.mp3'
  52. return {"text": model_instance.invoke_speech2text(file=buffer, user=end_user)}
  53. @classmethod
  54. def transcript_tts(cls, app_model: App, text: Optional[str] = None,
  55. voice: Optional[str] = None, end_user: Optional[str] = None, message_id: Optional[str] = None):
  56. from collections.abc import Generator
  57. from flask import Response, stream_with_context
  58. from app import app
  59. from extensions.ext_database import db
  60. def invoke_tts(text_content: str, app_model, voice: Optional[str] = None):
  61. with app.app_context():
  62. if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]:
  63. workflow = app_model.workflow
  64. if workflow is None:
  65. raise ValueError("TTS is not enabled")
  66. features_dict = workflow.features_dict
  67. if 'text_to_speech' not in features_dict or not features_dict['text_to_speech'].get('enabled'):
  68. raise ValueError("TTS is not enabled")
  69. voice = features_dict['text_to_speech'].get('voice') if voice is None else voice
  70. else:
  71. text_to_speech_dict = app_model.app_model_config.text_to_speech_dict
  72. if not text_to_speech_dict.get('enabled'):
  73. raise ValueError("TTS is not enabled")
  74. voice = text_to_speech_dict.get('voice') if voice is None else voice
  75. model_manager = ModelManager()
  76. model_instance = model_manager.get_default_model_instance(
  77. tenant_id=app_model.tenant_id,
  78. model_type=ModelType.TTS
  79. )
  80. try:
  81. if not voice:
  82. voices = model_instance.get_tts_voices()
  83. if voices:
  84. voice = voices[0].get('value')
  85. else:
  86. raise ValueError("Sorry, no voice available.")
  87. return model_instance.invoke_tts(
  88. content_text=text_content.strip(),
  89. user=end_user,
  90. tenant_id=app_model.tenant_id,
  91. voice=voice
  92. )
  93. except Exception as e:
  94. raise e
  95. if message_id:
  96. message = db.session.query(Message).filter(
  97. Message.id == message_id
  98. ).first()
  99. if message.answer == '' and message.status == 'normal':
  100. return None
  101. else:
  102. response = invoke_tts(message.answer, app_model=app_model, voice=voice)
  103. if isinstance(response, Generator):
  104. return Response(stream_with_context(response), content_type='audio/mpeg')
  105. return response
  106. else:
  107. response = invoke_tts(text, app_model, voice)
  108. if isinstance(response, Generator):
  109. return Response(stream_with_context(response), content_type='audio/mpeg')
  110. return response
  111. @classmethod
  112. def transcript_tts_voices(cls, tenant_id: str, language: str):
  113. model_manager = ModelManager()
  114. model_instance = model_manager.get_default_model_instance(
  115. tenant_id=tenant_id,
  116. model_type=ModelType.TTS
  117. )
  118. if model_instance is None:
  119. raise ProviderNotSupportTextToSpeechServiceError()
  120. try:
  121. return model_instance.get_tts_voices(language)
  122. except Exception as e:
  123. raise e