model_template.py 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. import json
  2. model_templates = {
  3. # completion default mode
  4. 'completion_default': {
  5. 'app': {
  6. 'mode': 'completion',
  7. 'enable_site': True,
  8. 'enable_api': True,
  9. 'is_demo': False,
  10. 'api_rpm': 0,
  11. 'api_rph': 0,
  12. 'status': 'normal'
  13. },
  14. 'model_config': {
  15. 'provider': 'openai',
  16. 'model_id': 'gpt-3.5-turbo-instruct',
  17. 'configs': {
  18. 'prompt_template': '',
  19. 'prompt_variables': [],
  20. 'completion_params': {
  21. 'max_token': 512,
  22. 'temperature': 1,
  23. 'top_p': 1,
  24. 'presence_penalty': 0,
  25. 'frequency_penalty': 0,
  26. }
  27. },
  28. 'model': json.dumps({
  29. "provider": "openai",
  30. "name": "gpt-3.5-turbo-instruct",
  31. "mode": "completion",
  32. "completion_params": {
  33. "max_tokens": 512,
  34. "temperature": 1,
  35. "top_p": 1,
  36. "presence_penalty": 0,
  37. "frequency_penalty": 0
  38. }
  39. }),
  40. 'user_input_form': json.dumps([
  41. {
  42. "paragraph": {
  43. "label": "Query",
  44. "variable": "query",
  45. "required": True,
  46. "default": ""
  47. }
  48. }
  49. ]),
  50. 'pre_prompt': '{{query}}'
  51. }
  52. },
  53. # chat default mode
  54. 'chat_default': {
  55. 'app': {
  56. 'mode': 'chat',
  57. 'enable_site': True,
  58. 'enable_api': True,
  59. 'is_demo': False,
  60. 'api_rpm': 0,
  61. 'api_rph': 0,
  62. 'status': 'normal'
  63. },
  64. 'model_config': {
  65. 'provider': 'openai',
  66. 'model_id': 'gpt-3.5-turbo',
  67. 'configs': {
  68. 'prompt_template': '',
  69. 'prompt_variables': [],
  70. 'completion_params': {
  71. 'max_token': 512,
  72. 'temperature': 1,
  73. 'top_p': 1,
  74. 'presence_penalty': 0,
  75. 'frequency_penalty': 0,
  76. }
  77. },
  78. 'model': json.dumps({
  79. "provider": "openai",
  80. "name": "gpt-3.5-turbo",
  81. "mode": "chat",
  82. "completion_params": {
  83. "max_tokens": 512,
  84. "temperature": 1,
  85. "top_p": 1,
  86. "presence_penalty": 0,
  87. "frequency_penalty": 0
  88. }
  89. })
  90. }
  91. },
  92. }