prompt_transform.py 3.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283
  1. from typing import Optional
  2. from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
  3. from core.memory.token_buffer_memory import TokenBufferMemory
  4. from core.model_manager import ModelInstance
  5. from core.model_runtime.entities.message_entities import PromptMessage
  6. from core.model_runtime.entities.model_entities import ModelPropertyKey
  7. from core.prompt.entities.advanced_prompt_entities import MemoryConfig
  8. class PromptTransform:
  9. def _append_chat_histories(self, memory: TokenBufferMemory,
  10. memory_config: MemoryConfig,
  11. prompt_messages: list[PromptMessage],
  12. model_config: ModelConfigWithCredentialsEntity) -> list[PromptMessage]:
  13. rest_tokens = self._calculate_rest_token(prompt_messages, model_config)
  14. histories = self._get_history_messages_list_from_memory(memory, memory_config, rest_tokens)
  15. prompt_messages.extend(histories)
  16. return prompt_messages
  17. def _calculate_rest_token(self, prompt_messages: list[PromptMessage],
  18. model_config: ModelConfigWithCredentialsEntity) -> int:
  19. rest_tokens = 2000
  20. model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
  21. if model_context_tokens:
  22. model_instance = ModelInstance(
  23. provider_model_bundle=model_config.provider_model_bundle,
  24. model=model_config.model
  25. )
  26. curr_message_tokens = model_instance.get_llm_num_tokens(
  27. prompt_messages
  28. )
  29. max_tokens = 0
  30. for parameter_rule in model_config.model_schema.parameter_rules:
  31. if (parameter_rule.name == 'max_tokens'
  32. or (parameter_rule.use_template and parameter_rule.use_template == 'max_tokens')):
  33. max_tokens = (model_config.parameters.get(parameter_rule.name)
  34. or model_config.parameters.get(parameter_rule.use_template)) or 0
  35. rest_tokens = model_context_tokens - max_tokens - curr_message_tokens
  36. rest_tokens = max(rest_tokens, 0)
  37. return rest_tokens
  38. def _get_history_messages_from_memory(self, memory: TokenBufferMemory,
  39. memory_config: MemoryConfig,
  40. max_token_limit: int,
  41. human_prefix: Optional[str] = None,
  42. ai_prefix: Optional[str] = None) -> str:
  43. """Get memory messages."""
  44. kwargs = {
  45. "max_token_limit": max_token_limit
  46. }
  47. if human_prefix:
  48. kwargs['human_prefix'] = human_prefix
  49. if ai_prefix:
  50. kwargs['ai_prefix'] = ai_prefix
  51. if memory_config.window.enabled and memory_config.window.size is not None and memory_config.window.size > 0:
  52. kwargs['message_limit'] = memory_config.window.size
  53. return memory.get_history_prompt_text(
  54. **kwargs
  55. )
  56. def _get_history_messages_list_from_memory(self, memory: TokenBufferMemory,
  57. memory_config: MemoryConfig,
  58. max_token_limit: int) -> list[PromptMessage]:
  59. """Get memory messages."""
  60. return memory.get_history_prompt_messages(
  61. max_token_limit=max_token_limit,
  62. message_limit=memory_config.window.size
  63. if (memory_config.window.enabled
  64. and memory_config.window.size is not None
  65. and memory_config.window.size > 0)
  66. else None
  67. )