test_anthropic_model.py 1.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162
  1. import json
  2. import os
  3. from unittest.mock import patch
  4. from langchain.schema import ChatGeneration, AIMessage
  5. from core.model_providers.models.entity.message import PromptMessage, MessageType
  6. from core.model_providers.models.entity.model_params import ModelKwargs
  7. from core.model_providers.models.llm.anthropic_model import AnthropicModel
  8. from core.model_providers.providers.anthropic_provider import AnthropicProvider
  9. from models.provider import Provider, ProviderType
  10. def get_mock_provider(valid_api_key):
  11. return Provider(
  12. id='provider_id',
  13. tenant_id='tenant_id',
  14. provider_name='anthropic',
  15. provider_type=ProviderType.CUSTOM.value,
  16. encrypted_config=json.dumps({'anthropic_api_key': valid_api_key}),
  17. is_valid=True,
  18. )
  19. def get_mock_model(model_name):
  20. model_kwargs = ModelKwargs(
  21. max_tokens=10,
  22. temperature=0
  23. )
  24. valid_api_key = os.environ['ANTHROPIC_API_KEY']
  25. model_provider = AnthropicProvider(provider=get_mock_provider(valid_api_key))
  26. return AnthropicModel(
  27. model_provider=model_provider,
  28. name=model_name,
  29. model_kwargs=model_kwargs
  30. )
  31. def decrypt_side_effect(tenant_id, encrypted_api_key):
  32. return encrypted_api_key
  33. @patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
  34. def test_get_num_tokens(mock_decrypt):
  35. model = get_mock_model('claude-2')
  36. rst = model.get_num_tokens([
  37. PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?')
  38. ])
  39. assert rst == 6
  40. @patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
  41. def test_run(mock_decrypt, mocker):
  42. mocker.patch('core.model_providers.providers.base.BaseModelProvider.update_last_used', return_value=None)
  43. model = get_mock_model('claude-2')
  44. messages = [PromptMessage(content='Human: 1 + 1=? \nAssistant: ')]
  45. rst = model.run(
  46. messages,
  47. stop=['\nHuman:'],
  48. )
  49. assert len(rst.content) > 0