audio_service.py 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839
  1. import io
  2. from werkzeug.datastructures import FileStorage
  3. from core.llm.llm_builder import LLMBuilder
  4. from core.llm.provider.llm_provider_service import LLMProviderService
  5. from services.errors.audio import NoAudioUploadedServiceError, AudioTooLargeServiceError, UnsupportedAudioTypeServiceError, ProviderNotSupportSpeechToTextServiceError
  6. from core.llm.whisper import Whisper
  7. from models.provider import ProviderName
  8. FILE_SIZE = 15
  9. FILE_SIZE_LIMIT = FILE_SIZE * 1024 * 1024
  10. ALLOWED_EXTENSIONS = ['mp3', 'mp4', 'mpeg', 'mpga', 'm4a', 'wav', 'webm']
  11. class AudioService:
  12. @classmethod
  13. def transcript(cls, tenant_id: str, file: FileStorage):
  14. if file is None:
  15. raise NoAudioUploadedServiceError()
  16. extension = file.mimetype
  17. if extension not in [f'audio/{ext}' for ext in ALLOWED_EXTENSIONS]:
  18. raise UnsupportedAudioTypeServiceError()
  19. file_content = file.read()
  20. file_size = len(file_content)
  21. if file_size > FILE_SIZE_LIMIT:
  22. message = f"Audio size larger than {FILE_SIZE} mb"
  23. raise AudioTooLargeServiceError(message)
  24. provider_name = LLMBuilder.get_default_provider(tenant_id, 'whisper-1')
  25. if provider_name != ProviderName.OPENAI.value:
  26. raise ProviderNotSupportSpeechToTextServiceError()
  27. provider_service = LLMProviderService(tenant_id, provider_name)
  28. buffer = io.BytesIO(file_content)
  29. buffer.name = 'temp.mp3'
  30. return Whisper(provider_service.provider).transcribe(buffer)