streamable_open_ai.py 616 B

1234567891011121314151617181920
  1. from langchain.schema import LLMResult
  2. from typing import Optional, List
  3. from langchain import OpenAI
  4. from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async
  5. class StreamableOpenAI(OpenAI):
  6. @handle_llm_exceptions
  7. def generate(
  8. self, prompts: List[str], stop: Optional[List[str]] = None
  9. ) -> LLMResult:
  10. return super().generate(prompts, stop)
  11. @handle_llm_exceptions_async
  12. async def agenerate(
  13. self, prompts: List[str], stop: Optional[List[str]] = None
  14. ) -> LLMResult:
  15. return await super().agenerate(prompts, stop)