test_llm.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. import os
  2. from typing import Generator
  3. import pytest
  4. from core.model_runtime.entities.message_entities import SystemPromptMessage, UserPromptMessage, AssistantPromptMessage
  5. from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, \
  6. LLMResultChunkDelta
  7. from core.model_runtime.errors.validate import CredentialsValidateFailedError
  8. from core.model_runtime.model_providers.anthropic.llm.llm import AnthropicLargeLanguageModel
  9. from tests.integration_tests.model_runtime.__mock.anthropic import setup_anthropic_mock
  10. @pytest.mark.parametrize('setup_anthropic_mock', [['none']], indirect=True)
  11. def test_validate_credentials(setup_anthropic_mock):
  12. model = AnthropicLargeLanguageModel()
  13. with pytest.raises(CredentialsValidateFailedError):
  14. model.validate_credentials(
  15. model='claude-instant-1',
  16. credentials={
  17. 'anthropic_api_key': 'invalid_key'
  18. }
  19. )
  20. model.validate_credentials(
  21. model='claude-instant-1',
  22. credentials={
  23. 'anthropic_api_key': os.environ.get('ANTHROPIC_API_KEY')
  24. }
  25. )
  26. @pytest.mark.parametrize('setup_anthropic_mock', [['none']], indirect=True)
  27. def test_invoke_model(setup_anthropic_mock):
  28. model = AnthropicLargeLanguageModel()
  29. response = model.invoke(
  30. model='claude-instant-1',
  31. credentials={
  32. 'anthropic_api_key': os.environ.get('ANTHROPIC_API_KEY'),
  33. 'anthropic_api_url': os.environ.get('ANTHROPIC_API_URL')
  34. },
  35. prompt_messages=[
  36. SystemPromptMessage(
  37. content='You are a helpful AI assistant.',
  38. ),
  39. UserPromptMessage(
  40. content='Hello World!'
  41. )
  42. ],
  43. model_parameters={
  44. 'temperature': 0.0,
  45. 'top_p': 1.0,
  46. 'max_tokens_to_sample': 10
  47. },
  48. stop=['How'],
  49. stream=False,
  50. user="abc-123"
  51. )
  52. assert isinstance(response, LLMResult)
  53. assert len(response.message.content) > 0
  54. @pytest.mark.parametrize('setup_anthropic_mock', [['none']], indirect=True)
  55. def test_invoke_stream_model(setup_anthropic_mock):
  56. model = AnthropicLargeLanguageModel()
  57. response = model.invoke(
  58. model='claude-instant-1',
  59. credentials={
  60. 'anthropic_api_key': os.environ.get('ANTHROPIC_API_KEY')
  61. },
  62. prompt_messages=[
  63. SystemPromptMessage(
  64. content='You are a helpful AI assistant.',
  65. ),
  66. UserPromptMessage(
  67. content='Hello World!'
  68. )
  69. ],
  70. model_parameters={
  71. 'temperature': 0.0,
  72. 'max_tokens_to_sample': 100
  73. },
  74. stream=True,
  75. user="abc-123"
  76. )
  77. assert isinstance(response, Generator)
  78. for chunk in response:
  79. assert isinstance(chunk, LLMResultChunk)
  80. assert isinstance(chunk.delta, LLMResultChunkDelta)
  81. assert isinstance(chunk.delta.message, AssistantPromptMessage)
  82. assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
  83. def test_get_num_tokens():
  84. model = AnthropicLargeLanguageModel()
  85. num_tokens = model.get_num_tokens(
  86. model='claude-instant-1',
  87. credentials={
  88. 'anthropic_api_key': os.environ.get('ANTHROPIC_API_KEY')
  89. },
  90. prompt_messages=[
  91. SystemPromptMessage(
  92. content='You are a helpful AI assistant.',
  93. ),
  94. UserPromptMessage(
  95. content='Hello World!'
  96. )
  97. ]
  98. )
  99. assert num_tokens == 18