test_llm.py 3.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. import os
  2. from collections.abc import Generator
  3. import pytest
  4. from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
  5. from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
  6. from core.model_runtime.errors.validate import CredentialsValidateFailedError
  7. from core.model_runtime.model_providers.anthropic.llm.llm import AnthropicLargeLanguageModel
  8. from tests.integration_tests.model_runtime.__mock.anthropic import setup_anthropic_mock
  9. @pytest.mark.parametrize("setup_anthropic_mock", [["none"]], indirect=True)
  10. def test_validate_credentials(setup_anthropic_mock):
  11. model = AnthropicLargeLanguageModel()
  12. with pytest.raises(CredentialsValidateFailedError):
  13. model.validate_credentials(model="claude-instant-1.2", credentials={"anthropic_api_key": "invalid_key"})
  14. model.validate_credentials(
  15. model="claude-instant-1.2", credentials={"anthropic_api_key": os.environ.get("ANTHROPIC_API_KEY")}
  16. )
  17. @pytest.mark.parametrize("setup_anthropic_mock", [["none"]], indirect=True)
  18. def test_invoke_model(setup_anthropic_mock):
  19. model = AnthropicLargeLanguageModel()
  20. response = model.invoke(
  21. model="claude-instant-1.2",
  22. credentials={
  23. "anthropic_api_key": os.environ.get("ANTHROPIC_API_KEY"),
  24. "anthropic_api_url": os.environ.get("ANTHROPIC_API_URL"),
  25. },
  26. prompt_messages=[
  27. SystemPromptMessage(
  28. content="You are a helpful AI assistant.",
  29. ),
  30. UserPromptMessage(content="Hello World!"),
  31. ],
  32. model_parameters={"temperature": 0.0, "top_p": 1.0, "max_tokens": 10},
  33. stop=["How"],
  34. stream=False,
  35. user="abc-123",
  36. )
  37. assert isinstance(response, LLMResult)
  38. assert len(response.message.content) > 0
  39. @pytest.mark.parametrize("setup_anthropic_mock", [["none"]], indirect=True)
  40. def test_invoke_stream_model(setup_anthropic_mock):
  41. model = AnthropicLargeLanguageModel()
  42. response = model.invoke(
  43. model="claude-instant-1.2",
  44. credentials={"anthropic_api_key": os.environ.get("ANTHROPIC_API_KEY")},
  45. prompt_messages=[
  46. SystemPromptMessage(
  47. content="You are a helpful AI assistant.",
  48. ),
  49. UserPromptMessage(content="Hello World!"),
  50. ],
  51. model_parameters={"temperature": 0.0, "max_tokens": 100},
  52. stream=True,
  53. user="abc-123",
  54. )
  55. assert isinstance(response, Generator)
  56. for chunk in response:
  57. assert isinstance(chunk, LLMResultChunk)
  58. assert isinstance(chunk.delta, LLMResultChunkDelta)
  59. assert isinstance(chunk.delta.message, AssistantPromptMessage)
  60. assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
  61. def test_get_num_tokens():
  62. model = AnthropicLargeLanguageModel()
  63. num_tokens = model.get_num_tokens(
  64. model="claude-instant-1.2",
  65. credentials={"anthropic_api_key": os.environ.get("ANTHROPIC_API_KEY")},
  66. prompt_messages=[
  67. SystemPromptMessage(
  68. content="You are a helpful AI assistant.",
  69. ),
  70. UserPromptMessage(content="Hello World!"),
  71. ],
  72. )
  73. assert num_tokens == 18