.. |
__init__.py
|
5fa2161b05
feat: server multi models support (#799)
|
vor 2 Jahren |
anthropic_provider.py
|
2eba98a465
feat: optimize anthropic connection pool (#1066)
|
vor 2 Jahren |
azure_openai_provider.py
|
1bd0a76a20
feat: optimize error raise (#820)
|
vor 2 Jahren |
base.py
|
5fa2161b05
feat: server multi models support (#799)
|
vor 2 Jahren |
chatglm_provider.py
|
5fa2161b05
feat: server multi models support (#799)
|
vor 2 Jahren |
hosted.py
|
9adbeadeec
feat: claude paid optimize (#890)
|
vor 2 Jahren |
huggingface_hub_provider.py
|
9b247fccd4
feat: adjust hf max tokens (#979)
|
vor 2 Jahren |
localai_provider.py
|
417c19577a
feat: add LocalAI local embedding model support (#1021)
|
vor 2 Jahren |
minimax_provider.py
|
5fa2161b05
feat: server multi models support (#799)
|
vor 2 Jahren |
openai_provider.py
|
5fa2161b05
feat: server multi models support (#799)
|
vor 2 Jahren |
openllm_provider.py
|
6c832ee328
fix: remove openllm pypi package because of this package too large (#931)
|
vor 2 Jahren |
replicate_provider.py
|
95b179fb39
fix: replicate text generation model validate (#923)
|
vor 2 Jahren |
spark_provider.py
|
f42e7d1a61
feat: add spark v2 support (#885)
|
vor 2 Jahren |
tongyi_provider.py
|
5fa2161b05
feat: server multi models support (#799)
|
vor 2 Jahren |
wenxin_provider.py
|
5fa2161b05
feat: server multi models support (#799)
|
vor 2 Jahren |
xinference_provider.py
|
9ae91a2ec3
feat: optimize xinference request max token key and stop reason (#998)
|
vor 2 Jahren |