defaults.py 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. from core.model_runtime.entities.model_entities import DefaultParameterName
  2. PARAMETER_RULE_TEMPLATE: dict[DefaultParameterName, dict] = {
  3. DefaultParameterName.TEMPERATURE: {
  4. 'label': {
  5. 'en_US': 'Temperature',
  6. 'zh_Hans': '温度',
  7. },
  8. 'type': 'float',
  9. 'help': {
  10. 'en_US': 'Controls randomness. Lower temperature results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. Higher temperature results in more random completions.',
  11. 'zh_Hans': '温度控制随机性。较低的温度会导致较少的随机完成。随着温度接近零,模型将变得确定性和重复性。较高的温度会导致更多的随机完成。',
  12. },
  13. 'required': False,
  14. 'default': 0.0,
  15. 'min': 0.0,
  16. 'max': 1.0,
  17. 'precision': 2,
  18. },
  19. DefaultParameterName.TOP_P: {
  20. 'label': {
  21. 'en_US': 'Top P',
  22. 'zh_Hans': 'Top P',
  23. },
  24. 'type': 'float',
  25. 'help': {
  26. 'en_US': 'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered.',
  27. 'zh_Hans': '通过核心采样控制多样性:0.5表示考虑了一半的所有可能性加权选项。',
  28. },
  29. 'required': False,
  30. 'default': 1.0,
  31. 'min': 0.0,
  32. 'max': 1.0,
  33. 'precision': 2,
  34. },
  35. DefaultParameterName.PRESENCE_PENALTY: {
  36. 'label': {
  37. 'en_US': 'Presence Penalty',
  38. 'zh_Hans': '存在惩罚',
  39. },
  40. 'type': 'float',
  41. 'help': {
  42. 'en_US': 'Applies a penalty to the log-probability of tokens already in the text.',
  43. 'zh_Hans': '对文本中已有的标记的对数概率施加惩罚。',
  44. },
  45. 'required': False,
  46. 'default': 0.0,
  47. 'min': 0.0,
  48. 'max': 1.0,
  49. 'precision': 2,
  50. },
  51. DefaultParameterName.FREQUENCY_PENALTY: {
  52. 'label': {
  53. 'en_US': 'Frequency Penalty',
  54. 'zh_Hans': '频率惩罚',
  55. },
  56. 'type': 'float',
  57. 'help': {
  58. 'en_US': 'Applies a penalty to the log-probability of tokens that appear in the text.',
  59. 'zh_Hans': '对文本中出现的标记的对数概率施加惩罚。',
  60. },
  61. 'required': False,
  62. 'default': 0.0,
  63. 'min': 0.0,
  64. 'max': 1.0,
  65. 'precision': 2,
  66. },
  67. DefaultParameterName.MAX_TOKENS: {
  68. 'label': {
  69. 'en_US': 'Max Tokens',
  70. 'zh_Hans': '最大标记',
  71. },
  72. 'type': 'int',
  73. 'help': {
  74. 'en_US': 'Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.',
  75. 'zh_Hans': '指定生成结果长度的上限。如果生成结果截断,可以调大该参数。',
  76. },
  77. 'required': False,
  78. 'default': 64,
  79. 'min': 1,
  80. 'max': 2048,
  81. 'precision': 0,
  82. },
  83. DefaultParameterName.RESPONSE_FORMAT: {
  84. 'label': {
  85. 'en_US': 'Response Format',
  86. 'zh_Hans': '回复格式',
  87. },
  88. 'type': 'string',
  89. 'help': {
  90. 'en_US': 'Set a response format, ensure the output from llm is a valid code block as possible, such as JSON, XML, etc.',
  91. 'zh_Hans': '设置一个返回格式,确保llm的输出尽可能是有效的代码块,如JSON、XML等',
  92. },
  93. 'required': False,
  94. 'options': ['JSON', 'XML'],
  95. },
  96. DefaultParameterName.JSON_SCHEMA: {
  97. 'label': {
  98. 'en_US': 'JSON Schema',
  99. },
  100. 'type': 'text',
  101. 'help': {
  102. 'en_US': 'Set a response json schema will ensure LLM to adhere it.',
  103. 'zh_Hans': '设置返回的json schema,llm将按照它返回',
  104. },
  105. 'required': False,
  106. },
  107. }