test_llm.py 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. import os
  2. from collections.abc import Generator
  3. import pytest
  4. from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
  5. from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage
  6. from core.model_runtime.errors.validate import CredentialsValidateFailedError
  7. from core.model_runtime.model_providers.openllm.llm.llm import OpenLLMLargeLanguageModel
  8. def test_validate_credentials_for_chat_model():
  9. model = OpenLLMLargeLanguageModel()
  10. with pytest.raises(CredentialsValidateFailedError):
  11. model.validate_credentials(
  12. model='NOT IMPORTANT',
  13. credentials={
  14. 'server_url': 'invalid_key',
  15. }
  16. )
  17. model.validate_credentials(
  18. model='NOT IMPORTANT',
  19. credentials={
  20. 'server_url': os.environ.get('OPENLLM_SERVER_URL'),
  21. }
  22. )
  23. def test_invoke_model():
  24. model = OpenLLMLargeLanguageModel()
  25. response = model.invoke(
  26. model='NOT IMPORTANT',
  27. credentials={
  28. 'server_url': os.environ.get('OPENLLM_SERVER_URL'),
  29. },
  30. prompt_messages=[
  31. UserPromptMessage(
  32. content='Hello World!'
  33. )
  34. ],
  35. model_parameters={
  36. 'temperature': 0.7,
  37. 'top_p': 1.0,
  38. 'top_k': 1,
  39. },
  40. stop=['you'],
  41. user="abc-123",
  42. stream=False
  43. )
  44. assert isinstance(response, LLMResult)
  45. assert len(response.message.content) > 0
  46. assert response.usage.total_tokens > 0
  47. def test_invoke_stream_model():
  48. model = OpenLLMLargeLanguageModel()
  49. response = model.invoke(
  50. model='NOT IMPORTANT',
  51. credentials={
  52. 'server_url': os.environ.get('OPENLLM_SERVER_URL'),
  53. },
  54. prompt_messages=[
  55. UserPromptMessage(
  56. content='Hello World!'
  57. )
  58. ],
  59. model_parameters={
  60. 'temperature': 0.7,
  61. 'top_p': 1.0,
  62. 'top_k': 1,
  63. },
  64. stop=['you'],
  65. stream=True,
  66. user="abc-123"
  67. )
  68. assert isinstance(response, Generator)
  69. for chunk in response:
  70. assert isinstance(chunk, LLMResultChunk)
  71. assert isinstance(chunk.delta, LLMResultChunkDelta)
  72. assert isinstance(chunk.delta.message, AssistantPromptMessage)
  73. assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
  74. def test_get_num_tokens():
  75. model = OpenLLMLargeLanguageModel()
  76. response = model.get_num_tokens(
  77. model='NOT IMPORTANT',
  78. credentials={
  79. 'server_url': os.environ.get('OPENLLM_SERVER_URL'),
  80. },
  81. prompt_messages=[
  82. UserPromptMessage(
  83. content='Hello World!'
  84. )
  85. ],
  86. tools=[]
  87. )
  88. assert isinstance(response, int)
  89. assert response == 3