logging_callback.py 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. import json
  2. import logging
  3. import sys
  4. from typing import Optional
  5. from core.model_runtime.callbacks.base_callback import Callback
  6. from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk
  7. from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool
  8. from core.model_runtime.model_providers.__base.ai_model import AIModel
  9. logger = logging.getLogger(__name__)
  10. class LoggingCallback(Callback):
  11. def on_before_invoke(self, llm_instance: AIModel, model: str, credentials: dict,
  12. prompt_messages: list[PromptMessage], model_parameters: dict,
  13. tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
  14. stream: bool = True, user: Optional[str] = None) -> None:
  15. """
  16. Before invoke callback
  17. :param llm_instance: LLM instance
  18. :param model: model name
  19. :param credentials: model credentials
  20. :param prompt_messages: prompt messages
  21. :param model_parameters: model parameters
  22. :param tools: tools for tool calling
  23. :param stop: stop words
  24. :param stream: is stream response
  25. :param user: unique user id
  26. """
  27. self.print_text("\n[on_llm_before_invoke]\n", color='blue')
  28. self.print_text(f"Model: {model}\n", color='blue')
  29. self.print_text("Parameters:\n", color='blue')
  30. for key, value in model_parameters.items():
  31. self.print_text(f"\t{key}: {value}\n", color='blue')
  32. if stop:
  33. self.print_text(f"\tstop: {stop}\n", color='blue')
  34. if tools:
  35. self.print_text("\tTools:\n", color='blue')
  36. for tool in tools:
  37. self.print_text(f"\t\t{tool.name}\n", color='blue')
  38. self.print_text(f"Stream: {stream}\n", color='blue')
  39. if user:
  40. self.print_text(f"User: {user}\n", color='blue')
  41. self.print_text("Prompt messages:\n", color='blue')
  42. for prompt_message in prompt_messages:
  43. if prompt_message.name:
  44. self.print_text(f"\tname: {prompt_message.name}\n", color='blue')
  45. self.print_text(f"\trole: {prompt_message.role.value}\n", color='blue')
  46. self.print_text(f"\tcontent: {prompt_message.content}\n", color='blue')
  47. if stream:
  48. self.print_text("\n[on_llm_new_chunk]")
  49. def on_new_chunk(self, llm_instance: AIModel, chunk: LLMResultChunk, model: str, credentials: dict,
  50. prompt_messages: list[PromptMessage], model_parameters: dict,
  51. tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
  52. stream: bool = True, user: Optional[str] = None):
  53. """
  54. On new chunk callback
  55. :param llm_instance: LLM instance
  56. :param chunk: chunk
  57. :param model: model name
  58. :param credentials: model credentials
  59. :param prompt_messages: prompt messages
  60. :param model_parameters: model parameters
  61. :param tools: tools for tool calling
  62. :param stop: stop words
  63. :param stream: is stream response
  64. :param user: unique user id
  65. """
  66. sys.stdout.write(chunk.delta.message.content)
  67. sys.stdout.flush()
  68. def on_after_invoke(self, llm_instance: AIModel, result: LLMResult, model: str, credentials: dict,
  69. prompt_messages: list[PromptMessage], model_parameters: dict,
  70. tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
  71. stream: bool = True, user: Optional[str] = None) -> None:
  72. """
  73. After invoke callback
  74. :param llm_instance: LLM instance
  75. :param result: result
  76. :param model: model name
  77. :param credentials: model credentials
  78. :param prompt_messages: prompt messages
  79. :param model_parameters: model parameters
  80. :param tools: tools for tool calling
  81. :param stop: stop words
  82. :param stream: is stream response
  83. :param user: unique user id
  84. """
  85. self.print_text("\n[on_llm_after_invoke]\n", color='yellow')
  86. self.print_text(f"Content: {result.message.content}\n", color='yellow')
  87. if result.message.tool_calls:
  88. self.print_text("Tool calls:\n", color='yellow')
  89. for tool_call in result.message.tool_calls:
  90. self.print_text(f"\t{tool_call.id}\n", color='yellow')
  91. self.print_text(f"\t{tool_call.function.name}\n", color='yellow')
  92. self.print_text(f"\t{json.dumps(tool_call.function.arguments)}\n", color='yellow')
  93. self.print_text(f"Model: {result.model}\n", color='yellow')
  94. self.print_text(f"Usage: {result.usage}\n", color='yellow')
  95. self.print_text(f"System Fingerprint: {result.system_fingerprint}\n", color='yellow')
  96. def on_invoke_error(self, llm_instance: AIModel, ex: Exception, model: str, credentials: dict,
  97. prompt_messages: list[PromptMessage], model_parameters: dict,
  98. tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
  99. stream: bool = True, user: Optional[str] = None) -> None:
  100. """
  101. Invoke error callback
  102. :param llm_instance: LLM instance
  103. :param ex: exception
  104. :param model: model name
  105. :param credentials: model credentials
  106. :param prompt_messages: prompt messages
  107. :param model_parameters: model parameters
  108. :param tools: tools for tool calling
  109. :param stop: stop words
  110. :param stream: is stream response
  111. :param user: unique user id
  112. """
  113. self.print_text("\n[on_llm_invoke_error]\n", color='red')
  114. logger.exception(ex)