mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-12-26 17:55:58 +08:00
feat: 合并main
This commit is contained in:
@@ -14,10 +14,14 @@ var ModelList = []string{
|
||||
"qwen2-72b-instruct", "qwen2-57b-a14b-instruct", "qwen2-7b-instruct", "qwen2-1.5b-instruct", "qwen2-0.5b-instruct",
|
||||
"qwen1.5-110b-chat", "qwen1.5-72b-chat", "qwen1.5-32b-chat", "qwen1.5-14b-chat", "qwen1.5-7b-chat", "qwen1.5-1.8b-chat", "qwen1.5-0.5b-chat",
|
||||
"qwen-72b-chat", "qwen-14b-chat", "qwen-7b-chat", "qwen-1.8b-chat", "qwen-1.8b-longcontext-chat",
|
||||
"qvq-72b-preview",
|
||||
"qwen2.5-vl-72b-instruct", "qwen2.5-vl-7b-instruct", "qwen2.5-vl-2b-instruct", "qwen2.5-vl-1b-instruct", "qwen2.5-vl-0.5b-instruct",
|
||||
"qwen2-vl-7b-instruct", "qwen2-vl-2b-instruct", "qwen-vl-v1", "qwen-vl-chat-v1",
|
||||
"qwen2-audio-instruct", "qwen-audio-chat",
|
||||
"qwen2.5-math-72b-instruct", "qwen2.5-math-7b-instruct", "qwen2.5-math-1.5b-instruct", "qwen2-math-72b-instruct", "qwen2-math-7b-instruct", "qwen2-math-1.5b-instruct",
|
||||
"qwen2.5-coder-32b-instruct", "qwen2.5-coder-14b-instruct", "qwen2.5-coder-7b-instruct", "qwen2.5-coder-3b-instruct", "qwen2.5-coder-1.5b-instruct", "qwen2.5-coder-0.5b-instruct",
|
||||
"text-embedding-v1", "text-embedding-v3", "text-embedding-v2", "text-embedding-async-v2", "text-embedding-async-v1",
|
||||
"ali-stable-diffusion-xl", "ali-stable-diffusion-v1.5", "wanx-v1",
|
||||
"qwen-mt-plus", "qwen-mt-turbo",
|
||||
"deepseek-r1", "deepseek-v3", "deepseek-r1-distill-qwen-1.5b", "deepseek-r1-distill-qwen-7b", "deepseek-r1-distill-qwen-14b", "deepseek-r1-distill-qwen-32b", "deepseek-r1-distill-llama-8b", "deepseek-r1-distill-llama-70b",
|
||||
}
|
||||
|
||||
20
relay/adaptor/alibailian/constants.go
Normal file
20
relay/adaptor/alibailian/constants.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package alibailian
|
||||
|
||||
// https://help.aliyun.com/zh/model-studio/getting-started/models
|
||||
|
||||
var ModelList = []string{
|
||||
"qwen-turbo",
|
||||
"qwen-plus",
|
||||
"qwen-long",
|
||||
"qwen-max",
|
||||
"qwen-coder-plus",
|
||||
"qwen-coder-plus-latest",
|
||||
"qwen-coder-turbo",
|
||||
"qwen-coder-turbo-latest",
|
||||
"qwen-mt-plus",
|
||||
"qwen-mt-turbo",
|
||||
"qwq-32b-preview",
|
||||
|
||||
"deepseek-r1",
|
||||
"deepseek-v3",
|
||||
}
|
||||
19
relay/adaptor/alibailian/main.go
Normal file
19
relay/adaptor/alibailian/main.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package alibailian
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||
)
|
||||
|
||||
func GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
switch meta.Mode {
|
||||
case relaymode.ChatCompletions:
|
||||
return fmt.Sprintf("%s/compatible-mode/v1/chat/completions", meta.BaseURL), nil
|
||||
case relaymode.Embeddings:
|
||||
return fmt.Sprintf("%s/compatible-mode/v1/embeddings", meta.BaseURL), nil
|
||||
default:
|
||||
}
|
||||
return "", fmt.Errorf("unsupported relay mode %d for ali bailian", meta.Mode)
|
||||
}
|
||||
@@ -4,6 +4,7 @@ var ModelList = []string{
|
||||
"claude-instant-1.2", "claude-2.0", "claude-2.1",
|
||||
"claude-3-haiku-20240307",
|
||||
"claude-3-5-haiku-20241022",
|
||||
"claude-3-5-haiku-latest",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-5-sonnet-20240620",
|
||||
|
||||
30
relay/adaptor/baiduv2/constants.go
Normal file
30
relay/adaptor/baiduv2/constants.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package baiduv2
|
||||
|
||||
// https://console.bce.baidu.com/support/?_=1692863460488×tamp=1739074632076#/api?product=QIANFAN&project=%E5%8D%83%E5%B8%86ModelBuilder&parent=%E5%AF%B9%E8%AF%9DChat%20V2&api=v2%2Fchat%2Fcompletions&method=post
|
||||
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Fm2vrveyu#%E6%94%AF%E6%8C%81%E6%A8%A1%E5%9E%8B%E5%88%97%E8%A1%A8
|
||||
|
||||
var ModelList = []string{
|
||||
"ernie-4.0-8k-latest",
|
||||
"ernie-4.0-8k-preview",
|
||||
"ernie-4.0-8k",
|
||||
"ernie-4.0-turbo-8k-latest",
|
||||
"ernie-4.0-turbo-8k-preview",
|
||||
"ernie-4.0-turbo-8k",
|
||||
"ernie-4.0-turbo-128k",
|
||||
"ernie-3.5-8k-preview",
|
||||
"ernie-3.5-8k",
|
||||
"ernie-3.5-128k",
|
||||
"ernie-speed-8k",
|
||||
"ernie-speed-128k",
|
||||
"ernie-speed-pro-128k",
|
||||
"ernie-lite-8k",
|
||||
"ernie-lite-pro-128k",
|
||||
"ernie-tiny-8k",
|
||||
"ernie-char-8k",
|
||||
"ernie-char-fiction-8k",
|
||||
"ernie-novel-8k",
|
||||
"deepseek-v3",
|
||||
"deepseek-r1",
|
||||
"deepseek-r1-distill-qwen-32b",
|
||||
"deepseek-r1-distill-qwen-14b",
|
||||
}
|
||||
17
relay/adaptor/baiduv2/main.go
Normal file
17
relay/adaptor/baiduv2/main.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package baiduv2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||
)
|
||||
|
||||
func GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
switch meta.Mode {
|
||||
case relaymode.ChatCompletions:
|
||||
return fmt.Sprintf("%s/v2/chat/completions", meta.BaseURL), nil
|
||||
default:
|
||||
}
|
||||
return "", fmt.Errorf("unsupported relay mode %d for baidu v2", meta.Mode)
|
||||
}
|
||||
@@ -2,5 +2,5 @@ package deepseek
|
||||
|
||||
var ModelList = []string{
|
||||
"deepseek-chat",
|
||||
"deepseek-coder",
|
||||
"deepseek-reasoner",
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
@@ -20,12 +21,12 @@ type Adaptor struct {
|
||||
}
|
||||
|
||||
func (a *Adaptor) Init(meta *meta.Meta) {
|
||||
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
defaultVersion := config.GeminiVersion
|
||||
if meta.ActualModelName == "gemini-2.0-flash-exp" {
|
||||
if strings.Contains(meta.ActualModelName, "gemini-2.0") ||
|
||||
strings.Contains(meta.ActualModelName, "gemini-1.5") {
|
||||
defaultVersion = "v1beta"
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,35 @@
|
||||
package gemini
|
||||
|
||||
import (
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
|
||||
)
|
||||
|
||||
// https://ai.google.dev/models/gemini
|
||||
|
||||
var ModelList = []string{
|
||||
"gemini-pro", "gemini-1.0-pro",
|
||||
"gemini-1.5-flash", "gemini-1.5-pro",
|
||||
"text-embedding-004", "aqa",
|
||||
"gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-thinking-exp",
|
||||
var ModelList = geminiv2.ModelList
|
||||
|
||||
// ModelsSupportSystemInstruction is the list of models that support system instruction.
|
||||
//
|
||||
// https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/system-instructions
|
||||
var ModelsSupportSystemInstruction = []string{
|
||||
// "gemini-1.0-pro-002",
|
||||
// "gemini-1.5-flash", "gemini-1.5-flash-001", "gemini-1.5-flash-002",
|
||||
// "gemini-1.5-flash-8b",
|
||||
// "gemini-1.5-pro", "gemini-1.5-pro-001", "gemini-1.5-pro-002",
|
||||
// "gemini-1.5-pro-experimental",
|
||||
"gemini-2.0-flash", "gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
}
|
||||
|
||||
// IsModelSupportSystemInstruction check if the model support system instruction.
|
||||
//
|
||||
// Because the main version of Go is 1.20, slice.Contains cannot be used
|
||||
func IsModelSupportSystemInstruction(model string) bool {
|
||||
for _, m := range ModelsSupportSystemInstruction {
|
||||
if m == model {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -132,9 +132,16 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
|
||||
}
|
||||
// Converting system prompt to prompt from user for the same reason
|
||||
if content.Role == "system" {
|
||||
content.Role = "user"
|
||||
shouldAddDummyModelMessage = true
|
||||
if IsModelSupportSystemInstruction(textRequest.Model) {
|
||||
geminiRequest.SystemInstruction = &content
|
||||
geminiRequest.SystemInstruction.Role = ""
|
||||
continue
|
||||
} else {
|
||||
content.Role = "user"
|
||||
}
|
||||
}
|
||||
|
||||
geminiRequest.Contents = append(geminiRequest.Contents, content)
|
||||
|
||||
// If a system message is the last message, we need to add a dummy model message to make gemini happy
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package gemini
|
||||
|
||||
type ChatRequest struct {
|
||||
Contents []ChatContent `json:"contents"`
|
||||
SafetySettings []ChatSafetySettings `json:"safety_settings,omitempty"`
|
||||
GenerationConfig ChatGenerationConfig `json:"generation_config,omitempty"`
|
||||
Tools []ChatTools `json:"tools,omitempty"`
|
||||
Contents []ChatContent `json:"contents"`
|
||||
SafetySettings []ChatSafetySettings `json:"safety_settings,omitempty"`
|
||||
GenerationConfig ChatGenerationConfig `json:"generation_config,omitempty"`
|
||||
Tools []ChatTools `json:"tools,omitempty"`
|
||||
SystemInstruction *ChatContent `json:"system_instruction,omitempty"`
|
||||
}
|
||||
|
||||
type EmbeddingRequest struct {
|
||||
|
||||
15
relay/adaptor/geminiv2/constants.go
Normal file
15
relay/adaptor/geminiv2/constants.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package geminiv2
|
||||
|
||||
// https://ai.google.dev/models/gemini
|
||||
|
||||
var ModelList = []string{
|
||||
"gemini-pro", "gemini-1.0-pro",
|
||||
// "gemma-2-2b-it", "gemma-2-9b-it", "gemma-2-27b-it",
|
||||
"gemini-1.5-flash", "gemini-1.5-flash-8b",
|
||||
"gemini-1.5-pro", "gemini-1.5-pro-experimental",
|
||||
"text-embedding-004", "aqa",
|
||||
"gemini-2.0-flash", "gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-lite-preview-02-05",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
"gemini-2.0-pro-exp-02-05",
|
||||
}
|
||||
14
relay/adaptor/geminiv2/main.go
Normal file
14
relay/adaptor/geminiv2/main.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package geminiv2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
)
|
||||
|
||||
func GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
baseURL := strings.TrimSuffix(meta.BaseURL, "/")
|
||||
requestPath := strings.TrimPrefix(meta.RequestURLPath, "/v1")
|
||||
return fmt.Sprintf("%s%s", baseURL, requestPath), nil
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package groq
|
||||
// https://console.groq.com/docs/models
|
||||
|
||||
var ModelList = []string{
|
||||
"gemma-7b-it",
|
||||
"gemma2-9b-it",
|
||||
"llama-3.1-70b-versatile",
|
||||
"llama-3.1-8b-instant",
|
||||
@@ -11,7 +10,6 @@ var ModelList = []string{
|
||||
"llama-3.2-11b-vision-preview",
|
||||
"llama-3.2-1b-preview",
|
||||
"llama-3.2-3b-preview",
|
||||
"llama-3.2-11b-vision-preview",
|
||||
"llama-3.2-90b-text-preview",
|
||||
"llama-3.2-90b-vision-preview",
|
||||
"llama-guard-3-8b",
|
||||
@@ -24,4 +22,6 @@ var ModelList = []string{
|
||||
"distil-whisper-large-v3-en",
|
||||
"whisper-large-v3",
|
||||
"whisper-large-v3-turbo",
|
||||
"deepseek-r1-distill-llama-70b-specdec",
|
||||
"deepseek-r1-distill-llama-70b",
|
||||
}
|
||||
|
||||
@@ -8,4 +8,6 @@ var ModelList = []string{
|
||||
"abab6-chat",
|
||||
"abab5.5-chat",
|
||||
"abab5.5s-chat",
|
||||
"MiniMax-VL-01",
|
||||
"MiniMax-Text-01",
|
||||
}
|
||||
|
||||
@@ -8,8 +8,12 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"github.com/songquanpeng/one-api/relay/adaptor"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/alibailian"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/baiduv2"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/doubao"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/minimax"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/novita"
|
||||
"github.com/songquanpeng/one-api/relay/channeltype"
|
||||
@@ -52,6 +56,12 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
return doubao.GetRequestURL(meta)
|
||||
case channeltype.Novita:
|
||||
return novita.GetRequestURL(meta)
|
||||
case channeltype.BaiduV2:
|
||||
return baiduv2.GetRequestURL(meta)
|
||||
case channeltype.AliBailian:
|
||||
return alibailian.GetRequestURL(meta)
|
||||
case channeltype.GeminiOpenAICompatible:
|
||||
return geminiv2.GetRequestURL(meta)
|
||||
default:
|
||||
return GetFullRequestURL(meta.BaseURL, meta.RequestURLPath, meta.ChannelType), nil
|
||||
}
|
||||
|
||||
@@ -2,19 +2,24 @@ package openai
|
||||
|
||||
import (
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/ai360"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/alibailian"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/baichuan"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/baiduv2"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/deepseek"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/doubao"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/groq"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/lingyiwanwu"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/minimax"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/mistral"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/moonshot"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/novita"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openrouter"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/siliconflow"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/stepfun"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/togetherai"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/xai"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/xunfeiv2"
|
||||
"github.com/songquanpeng/one-api/relay/channeltype"
|
||||
)
|
||||
|
||||
@@ -34,6 +39,8 @@ var CompatibleChannels = []int{
|
||||
channeltype.Novita,
|
||||
channeltype.SiliconFlow,
|
||||
channeltype.XAI,
|
||||
channeltype.BaiduV2,
|
||||
channeltype.XunfeiV2,
|
||||
}
|
||||
|
||||
func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
||||
@@ -68,6 +75,16 @@ func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
||||
return "siliconflow", siliconflow.ModelList
|
||||
case channeltype.XAI:
|
||||
return "xai", xai.ModelList
|
||||
case channeltype.BaiduV2:
|
||||
return "baiduv2", baiduv2.ModelList
|
||||
case channeltype.XunfeiV2:
|
||||
return "xunfeiv2", xunfeiv2.ModelList
|
||||
case channeltype.OpenRouter:
|
||||
return "openrouter", openrouter.ModelList
|
||||
case channeltype.AliBailian:
|
||||
return "alibailian", alibailian.ModelList
|
||||
case channeltype.GeminiOpenAICompatible:
|
||||
return "geminiv2", geminiv2.ModelList
|
||||
default:
|
||||
return "openai", ModelList
|
||||
}
|
||||
|
||||
@@ -17,6 +17,9 @@ func ResponseText2Usage(responseText string, modelName string, promptTokens int)
|
||||
}
|
||||
|
||||
func GetFullRequestURL(baseURL string, requestURL string, channelType int) string {
|
||||
if channelType == channeltype.OpenAICompatible {
|
||||
return fmt.Sprintf("%s%s", strings.TrimSuffix(baseURL, "/"), strings.TrimPrefix(requestURL, "/v1"))
|
||||
}
|
||||
fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
|
||||
|
||||
if strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") {
|
||||
|
||||
@@ -3,14 +3,16 @@ package openai
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/pkoukk/tiktoken-go"
|
||||
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/image"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
billingratio "github.com/songquanpeng/one-api/relay/billing/ratio"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"math"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// tokenEncoderMap won't grow after initialization
|
||||
@@ -21,7 +23,8 @@ func InitTokenEncoders() {
|
||||
logger.SysLog("initializing token encoders")
|
||||
gpt35TokenEncoder, err := tiktoken.EncodingForModel("gpt-3.5-turbo")
|
||||
if err != nil {
|
||||
logger.FatalLog(fmt.Sprintf("failed to get gpt-3.5-turbo token encoder: %s", err.Error()))
|
||||
logger.FatalLog(fmt.Sprintf("failed to get gpt-3.5-turbo token encoder: %s, "+
|
||||
"if you are using in offline environment, please set TIKTOKEN_CACHE_DIR to use exsited files, check this link for more information: https://stackoverflow.com/questions/76106366/how-to-use-tiktoken-in-offline-mode-computer ", err.Error()))
|
||||
}
|
||||
defaultTokenEncoder = gpt35TokenEncoder
|
||||
gpt4oTokenEncoder, err := tiktoken.EncodingForModel("gpt-4o")
|
||||
|
||||
235
relay/adaptor/openrouter/constants.go
Normal file
235
relay/adaptor/openrouter/constants.go
Normal file
@@ -0,0 +1,235 @@
|
||||
package openrouter
|
||||
|
||||
var ModelList = []string{
|
||||
"01-ai/yi-large",
|
||||
"aetherwiing/mn-starcannon-12b",
|
||||
"ai21/jamba-1-5-large",
|
||||
"ai21/jamba-1-5-mini",
|
||||
"ai21/jamba-instruct",
|
||||
"aion-labs/aion-1.0",
|
||||
"aion-labs/aion-1.0-mini",
|
||||
"aion-labs/aion-rp-llama-3.1-8b",
|
||||
"allenai/llama-3.1-tulu-3-405b",
|
||||
"alpindale/goliath-120b",
|
||||
"alpindale/magnum-72b",
|
||||
"amazon/nova-lite-v1",
|
||||
"amazon/nova-micro-v1",
|
||||
"amazon/nova-pro-v1",
|
||||
"anthracite-org/magnum-v2-72b",
|
||||
"anthracite-org/magnum-v4-72b",
|
||||
"anthropic/claude-2",
|
||||
"anthropic/claude-2.0",
|
||||
"anthropic/claude-2.0:beta",
|
||||
"anthropic/claude-2.1",
|
||||
"anthropic/claude-2.1:beta",
|
||||
"anthropic/claude-2:beta",
|
||||
"anthropic/claude-3-haiku",
|
||||
"anthropic/claude-3-haiku:beta",
|
||||
"anthropic/claude-3-opus",
|
||||
"anthropic/claude-3-opus:beta",
|
||||
"anthropic/claude-3-sonnet",
|
||||
"anthropic/claude-3-sonnet:beta",
|
||||
"anthropic/claude-3.5-haiku",
|
||||
"anthropic/claude-3.5-haiku-20241022",
|
||||
"anthropic/claude-3.5-haiku-20241022:beta",
|
||||
"anthropic/claude-3.5-haiku:beta",
|
||||
"anthropic/claude-3.5-sonnet",
|
||||
"anthropic/claude-3.5-sonnet-20240620",
|
||||
"anthropic/claude-3.5-sonnet-20240620:beta",
|
||||
"anthropic/claude-3.5-sonnet:beta",
|
||||
"cognitivecomputations/dolphin-mixtral-8x22b",
|
||||
"cognitivecomputations/dolphin-mixtral-8x7b",
|
||||
"cohere/command",
|
||||
"cohere/command-r",
|
||||
"cohere/command-r-03-2024",
|
||||
"cohere/command-r-08-2024",
|
||||
"cohere/command-r-plus",
|
||||
"cohere/command-r-plus-04-2024",
|
||||
"cohere/command-r-plus-08-2024",
|
||||
"cohere/command-r7b-12-2024",
|
||||
"databricks/dbrx-instruct",
|
||||
"deepseek/deepseek-chat",
|
||||
"deepseek/deepseek-chat-v2.5",
|
||||
"deepseek/deepseek-chat:free",
|
||||
"deepseek/deepseek-r1",
|
||||
"deepseek/deepseek-r1-distill-llama-70b",
|
||||
"deepseek/deepseek-r1-distill-llama-70b:free",
|
||||
"deepseek/deepseek-r1-distill-llama-8b",
|
||||
"deepseek/deepseek-r1-distill-qwen-1.5b",
|
||||
"deepseek/deepseek-r1-distill-qwen-14b",
|
||||
"deepseek/deepseek-r1-distill-qwen-32b",
|
||||
"deepseek/deepseek-r1:free",
|
||||
"eva-unit-01/eva-llama-3.33-70b",
|
||||
"eva-unit-01/eva-qwen-2.5-32b",
|
||||
"eva-unit-01/eva-qwen-2.5-72b",
|
||||
"google/gemini-2.0-flash-001",
|
||||
"google/gemini-2.0-flash-exp:free",
|
||||
"google/gemini-2.0-flash-lite-preview-02-05:free",
|
||||
"google/gemini-2.0-flash-thinking-exp-1219:free",
|
||||
"google/gemini-2.0-flash-thinking-exp:free",
|
||||
"google/gemini-2.0-pro-exp-02-05:free",
|
||||
"google/gemini-exp-1206:free",
|
||||
"google/gemini-flash-1.5",
|
||||
"google/gemini-flash-1.5-8b",
|
||||
"google/gemini-flash-1.5-8b-exp",
|
||||
"google/gemini-pro",
|
||||
"google/gemini-pro-1.5",
|
||||
"google/gemini-pro-vision",
|
||||
"google/gemma-2-27b-it",
|
||||
"google/gemma-2-9b-it",
|
||||
"google/gemma-2-9b-it:free",
|
||||
"google/gemma-7b-it",
|
||||
"google/learnlm-1.5-pro-experimental:free",
|
||||
"google/palm-2-chat-bison",
|
||||
"google/palm-2-chat-bison-32k",
|
||||
"google/palm-2-codechat-bison",
|
||||
"google/palm-2-codechat-bison-32k",
|
||||
"gryphe/mythomax-l2-13b",
|
||||
"gryphe/mythomax-l2-13b:free",
|
||||
"huggingfaceh4/zephyr-7b-beta:free",
|
||||
"infermatic/mn-inferor-12b",
|
||||
"inflection/inflection-3-pi",
|
||||
"inflection/inflection-3-productivity",
|
||||
"jondurbin/airoboros-l2-70b",
|
||||
"liquid/lfm-3b",
|
||||
"liquid/lfm-40b",
|
||||
"liquid/lfm-7b",
|
||||
"mancer/weaver",
|
||||
"meta-llama/llama-2-13b-chat",
|
||||
"meta-llama/llama-2-70b-chat",
|
||||
"meta-llama/llama-3-70b-instruct",
|
||||
"meta-llama/llama-3-8b-instruct",
|
||||
"meta-llama/llama-3-8b-instruct:free",
|
||||
"meta-llama/llama-3.1-405b",
|
||||
"meta-llama/llama-3.1-405b-instruct",
|
||||
"meta-llama/llama-3.1-70b-instruct",
|
||||
"meta-llama/llama-3.1-8b-instruct",
|
||||
"meta-llama/llama-3.2-11b-vision-instruct",
|
||||
"meta-llama/llama-3.2-11b-vision-instruct:free",
|
||||
"meta-llama/llama-3.2-1b-instruct",
|
||||
"meta-llama/llama-3.2-3b-instruct",
|
||||
"meta-llama/llama-3.2-90b-vision-instruct",
|
||||
"meta-llama/llama-3.3-70b-instruct",
|
||||
"meta-llama/llama-3.3-70b-instruct:free",
|
||||
"meta-llama/llama-guard-2-8b",
|
||||
"microsoft/phi-3-medium-128k-instruct",
|
||||
"microsoft/phi-3-medium-128k-instruct:free",
|
||||
"microsoft/phi-3-mini-128k-instruct",
|
||||
"microsoft/phi-3-mini-128k-instruct:free",
|
||||
"microsoft/phi-3.5-mini-128k-instruct",
|
||||
"microsoft/phi-4",
|
||||
"microsoft/wizardlm-2-7b",
|
||||
"microsoft/wizardlm-2-8x22b",
|
||||
"minimax/minimax-01",
|
||||
"mistralai/codestral-2501",
|
||||
"mistralai/codestral-mamba",
|
||||
"mistralai/ministral-3b",
|
||||
"mistralai/ministral-8b",
|
||||
"mistralai/mistral-7b-instruct",
|
||||
"mistralai/mistral-7b-instruct-v0.1",
|
||||
"mistralai/mistral-7b-instruct-v0.3",
|
||||
"mistralai/mistral-7b-instruct:free",
|
||||
"mistralai/mistral-large",
|
||||
"mistralai/mistral-large-2407",
|
||||
"mistralai/mistral-large-2411",
|
||||
"mistralai/mistral-medium",
|
||||
"mistralai/mistral-nemo",
|
||||
"mistralai/mistral-nemo:free",
|
||||
"mistralai/mistral-small",
|
||||
"mistralai/mistral-small-24b-instruct-2501",
|
||||
"mistralai/mistral-small-24b-instruct-2501:free",
|
||||
"mistralai/mistral-tiny",
|
||||
"mistralai/mixtral-8x22b-instruct",
|
||||
"mistralai/mixtral-8x7b",
|
||||
"mistralai/mixtral-8x7b-instruct",
|
||||
"mistralai/pixtral-12b",
|
||||
"mistralai/pixtral-large-2411",
|
||||
"neversleep/llama-3-lumimaid-70b",
|
||||
"neversleep/llama-3-lumimaid-8b",
|
||||
"neversleep/llama-3-lumimaid-8b:extended",
|
||||
"neversleep/llama-3.1-lumimaid-70b",
|
||||
"neversleep/llama-3.1-lumimaid-8b",
|
||||
"neversleep/noromaid-20b",
|
||||
"nothingiisreal/mn-celeste-12b",
|
||||
"nousresearch/hermes-2-pro-llama-3-8b",
|
||||
"nousresearch/hermes-3-llama-3.1-405b",
|
||||
"nousresearch/hermes-3-llama-3.1-70b",
|
||||
"nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
|
||||
"nousresearch/nous-hermes-llama2-13b",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct:free",
|
||||
"openai/chatgpt-4o-latest",
|
||||
"openai/gpt-3.5-turbo",
|
||||
"openai/gpt-3.5-turbo-0125",
|
||||
"openai/gpt-3.5-turbo-0613",
|
||||
"openai/gpt-3.5-turbo-1106",
|
||||
"openai/gpt-3.5-turbo-16k",
|
||||
"openai/gpt-3.5-turbo-instruct",
|
||||
"openai/gpt-4",
|
||||
"openai/gpt-4-0314",
|
||||
"openai/gpt-4-1106-preview",
|
||||
"openai/gpt-4-32k",
|
||||
"openai/gpt-4-32k-0314",
|
||||
"openai/gpt-4-turbo",
|
||||
"openai/gpt-4-turbo-preview",
|
||||
"openai/gpt-4o",
|
||||
"openai/gpt-4o-2024-05-13",
|
||||
"openai/gpt-4o-2024-08-06",
|
||||
"openai/gpt-4o-2024-11-20",
|
||||
"openai/gpt-4o-mini",
|
||||
"openai/gpt-4o-mini-2024-07-18",
|
||||
"openai/gpt-4o:extended",
|
||||
"openai/o1",
|
||||
"openai/o1-mini",
|
||||
"openai/o1-mini-2024-09-12",
|
||||
"openai/o1-preview",
|
||||
"openai/o1-preview-2024-09-12",
|
||||
"openai/o3-mini",
|
||||
"openai/o3-mini-high",
|
||||
"openchat/openchat-7b",
|
||||
"openchat/openchat-7b:free",
|
||||
"openrouter/auto",
|
||||
"perplexity/llama-3.1-sonar-huge-128k-online",
|
||||
"perplexity/llama-3.1-sonar-large-128k-chat",
|
||||
"perplexity/llama-3.1-sonar-large-128k-online",
|
||||
"perplexity/llama-3.1-sonar-small-128k-chat",
|
||||
"perplexity/llama-3.1-sonar-small-128k-online",
|
||||
"perplexity/sonar",
|
||||
"perplexity/sonar-reasoning",
|
||||
"pygmalionai/mythalion-13b",
|
||||
"qwen/qvq-72b-preview",
|
||||
"qwen/qwen-2-72b-instruct",
|
||||
"qwen/qwen-2-7b-instruct",
|
||||
"qwen/qwen-2-7b-instruct:free",
|
||||
"qwen/qwen-2-vl-72b-instruct",
|
||||
"qwen/qwen-2-vl-7b-instruct",
|
||||
"qwen/qwen-2.5-72b-instruct",
|
||||
"qwen/qwen-2.5-7b-instruct",
|
||||
"qwen/qwen-2.5-coder-32b-instruct",
|
||||
"qwen/qwen-max",
|
||||
"qwen/qwen-plus",
|
||||
"qwen/qwen-turbo",
|
||||
"qwen/qwen-vl-plus:free",
|
||||
"qwen/qwen2.5-vl-72b-instruct:free",
|
||||
"qwen/qwq-32b-preview",
|
||||
"raifle/sorcererlm-8x22b",
|
||||
"sao10k/fimbulvetr-11b-v2",
|
||||
"sao10k/l3-euryale-70b",
|
||||
"sao10k/l3-lunaris-8b",
|
||||
"sao10k/l3.1-70b-hanami-x1",
|
||||
"sao10k/l3.1-euryale-70b",
|
||||
"sao10k/l3.3-euryale-70b",
|
||||
"sophosympatheia/midnight-rose-70b",
|
||||
"sophosympatheia/rogue-rose-103b-v0.2:free",
|
||||
"teknium/openhermes-2.5-mistral-7b",
|
||||
"thedrummer/rocinante-12b",
|
||||
"thedrummer/unslopnemo-12b",
|
||||
"undi95/remm-slerp-l2-13b",
|
||||
"undi95/toppy-m-7b",
|
||||
"undi95/toppy-m-7b:free",
|
||||
"x-ai/grok-2-1212",
|
||||
"x-ai/grok-2-vision-1212",
|
||||
"x-ai/grok-beta",
|
||||
"x-ai/grok-vision-beta",
|
||||
"xwin-lm/xwin-lm-70b",
|
||||
}
|
||||
@@ -2,16 +2,19 @@ package tencent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||
)
|
||||
|
||||
// https://cloud.tencent.com/document/api/1729/101837
|
||||
@@ -52,10 +55,18 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tencentRequest := ConvertRequest(*request)
|
||||
var convertedRequest any
|
||||
switch relayMode {
|
||||
case relaymode.Embeddings:
|
||||
a.Action = "GetEmbedding"
|
||||
convertedRequest = ConvertEmbeddingRequest(*request)
|
||||
default:
|
||||
a.Action = "ChatCompletions"
|
||||
convertedRequest = ConvertRequest(*request)
|
||||
}
|
||||
// we have to calculate the sign here
|
||||
a.Sign = GetSign(*tencentRequest, a, secretId, secretKey)
|
||||
return tencentRequest, nil
|
||||
a.Sign = GetSign(convertedRequest, a, secretId, secretKey)
|
||||
return convertedRequest, nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
|
||||
@@ -75,7 +86,12 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Met
|
||||
err, responseText = StreamHandler(c, resp)
|
||||
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
|
||||
} else {
|
||||
err, usage = Handler(c, resp)
|
||||
switch meta.Mode {
|
||||
case relaymode.Embeddings:
|
||||
err, usage = EmbeddingHandler(c, resp)
|
||||
default:
|
||||
err, usage = Handler(c, resp)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,4 +6,5 @@ var ModelList = []string{
|
||||
"hunyuan-standard-256K",
|
||||
"hunyuan-pro",
|
||||
"hunyuan-vision",
|
||||
"hunyuan-embedding",
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
@@ -16,11 +15,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/conv"
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/common/random"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
"github.com/songquanpeng/one-api/relay/constant"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
@@ -44,8 +46,68 @@ func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
|
||||
}
|
||||
}
|
||||
|
||||
func ConvertEmbeddingRequest(request model.GeneralOpenAIRequest) *EmbeddingRequest {
|
||||
return &EmbeddingRequest{
|
||||
InputList: request.ParseInput(),
|
||||
}
|
||||
}
|
||||
|
||||
func EmbeddingHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
|
||||
var tencentResponseP EmbeddingResponseP
|
||||
err := json.NewDecoder(resp.Body).Decode(&tencentResponseP)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
|
||||
tencentResponse := tencentResponseP.Response
|
||||
if tencentResponse.Error.Code != "" {
|
||||
return &model.ErrorWithStatusCode{
|
||||
Error: model.Error{
|
||||
Message: tencentResponse.Error.Message,
|
||||
Code: tencentResponse.Error.Code,
|
||||
},
|
||||
StatusCode: resp.StatusCode,
|
||||
}, nil
|
||||
}
|
||||
requestModel := c.GetString(ctxkey.RequestModel)
|
||||
fullTextResponse := embeddingResponseTencent2OpenAI(&tencentResponse)
|
||||
fullTextResponse.Model = requestModel
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
c.Writer.Header().Set("Content-Type", "application/json")
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
_, err = c.Writer.Write(jsonResponse)
|
||||
return nil, &fullTextResponse.Usage
|
||||
}
|
||||
|
||||
func embeddingResponseTencent2OpenAI(response *EmbeddingResponse) *openai.EmbeddingResponse {
|
||||
openAIEmbeddingResponse := openai.EmbeddingResponse{
|
||||
Object: "list",
|
||||
Data: make([]openai.EmbeddingResponseItem, 0, len(response.Data)),
|
||||
Model: "hunyuan-embedding",
|
||||
Usage: model.Usage{TotalTokens: response.EmbeddingUsage.TotalTokens},
|
||||
}
|
||||
|
||||
for _, item := range response.Data {
|
||||
openAIEmbeddingResponse.Data = append(openAIEmbeddingResponse.Data, openai.EmbeddingResponseItem{
|
||||
Object: item.Object,
|
||||
Index: item.Index,
|
||||
Embedding: item.Embedding,
|
||||
})
|
||||
}
|
||||
return &openAIEmbeddingResponse
|
||||
}
|
||||
|
||||
func responseTencent2OpenAI(response *ChatResponse) *openai.TextResponse {
|
||||
fullTextResponse := openai.TextResponse{
|
||||
Id: response.ReqID,
|
||||
Object: "chat.completion",
|
||||
Created: helper.GetTimestamp(),
|
||||
Usage: model.Usage{
|
||||
@@ -148,7 +210,7 @@ func Handler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
TencentResponse = responseP.Response
|
||||
if TencentResponse.Error.Code != 0 {
|
||||
if TencentResponse.Error.Code != "" {
|
||||
return &model.ErrorWithStatusCode{
|
||||
Error: model.Error{
|
||||
Message: TencentResponse.Error.Message,
|
||||
@@ -195,7 +257,7 @@ func hmacSha256(s, key string) string {
|
||||
return string(hashed.Sum(nil))
|
||||
}
|
||||
|
||||
func GetSign(req ChatRequest, adaptor *Adaptor, secId, secKey string) string {
|
||||
func GetSign(req any, adaptor *Adaptor, secId, secKey string) string {
|
||||
// build canonical request string
|
||||
host := "hunyuan.tencentcloudapi.com"
|
||||
httpRequestMethod := "POST"
|
||||
|
||||
@@ -35,16 +35,16 @@ type ChatRequest struct {
|
||||
// 1. 影响输出文本的多样性,取值越大,生成文本的多样性越强。
|
||||
// 2. 取值区间为 [0.0, 1.0],未传值时使用各模型推荐值。
|
||||
// 3. 非必要不建议使用,不合理的取值会影响效果。
|
||||
TopP *float64 `json:"TopP"`
|
||||
TopP *float64 `json:"TopP,omitempty"`
|
||||
// 说明:
|
||||
// 1. 较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定。
|
||||
// 2. 取值区间为 [0.0, 2.0],未传值时使用各模型推荐值。
|
||||
// 3. 非必要不建议使用,不合理的取值会影响效果。
|
||||
Temperature *float64 `json:"Temperature"`
|
||||
Temperature *float64 `json:"Temperature,omitempty"`
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
Code int `json:"Code"`
|
||||
Code string `json:"Code"`
|
||||
Message string `json:"Message"`
|
||||
}
|
||||
|
||||
@@ -61,15 +61,41 @@ type ResponseChoices struct {
|
||||
}
|
||||
|
||||
type ChatResponse struct {
|
||||
Choices []ResponseChoices `json:"Choices,omitempty"` // 结果
|
||||
Created int64 `json:"Created,omitempty"` // unix 时间戳的字符串
|
||||
Id string `json:"Id,omitempty"` // 会话 id
|
||||
Usage Usage `json:"Usage,omitempty"` // token 数量
|
||||
Error Error `json:"Error,omitempty"` // 错误信息 注意:此字段可能返回 null,表示取不到有效值
|
||||
Note string `json:"Note,omitempty"` // 注释
|
||||
ReqID string `json:"Req_id,omitempty"` // 唯一请求 Id,每次请求都会返回。用于反馈接口入参
|
||||
Choices []ResponseChoices `json:"Choices,omitempty"` // 结果
|
||||
Created int64 `json:"Created,omitempty"` // unix 时间戳的字符串
|
||||
Id string `json:"Id,omitempty"` // 会话 id
|
||||
Usage Usage `json:"Usage,omitempty"` // token 数量
|
||||
Error Error `json:"Error,omitempty"` // 错误信息 注意:此字段可能返回 null,表示取不到有效值
|
||||
Note string `json:"Note,omitempty"` // 注释
|
||||
ReqID string `json:"RequestId,omitempty"` // 唯一请求 Id,每次请求都会返回。用于反馈接口入参
|
||||
}
|
||||
|
||||
type ChatResponseP struct {
|
||||
Response ChatResponse `json:"Response,omitempty"`
|
||||
}
|
||||
|
||||
type EmbeddingRequest struct {
|
||||
InputList []string `json:"InputList"`
|
||||
}
|
||||
|
||||
type EmbeddingData struct {
|
||||
Embedding []float64 `json:"Embedding"`
|
||||
Index int `json:"Index"`
|
||||
Object string `json:"Object"`
|
||||
}
|
||||
|
||||
type EmbeddingUsage struct {
|
||||
PromptTokens int `json:"PromptTokens"`
|
||||
TotalTokens int `json:"TotalTokens"`
|
||||
}
|
||||
|
||||
type EmbeddingResponse struct {
|
||||
Data []EmbeddingData `json:"Data"`
|
||||
EmbeddingUsage EmbeddingUsage `json:"Usage,omitempty"`
|
||||
RequestId string `json:"RequestId,omitempty"`
|
||||
Error Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
type EmbeddingResponseP struct {
|
||||
Response EmbeddingResponse `json:"Response,omitempty"`
|
||||
}
|
||||
|
||||
@@ -16,9 +16,12 @@ import (
|
||||
|
||||
var ModelList = []string{
|
||||
"gemini-pro", "gemini-pro-vision",
|
||||
"gemini-1.5-pro-001", "gemini-1.5-flash-001",
|
||||
"gemini-1.5-pro-002", "gemini-1.5-flash-002",
|
||||
"gemini-2.0-flash-exp", "gemini-2.0-flash-thinking-exp",
|
||||
"gemini-exp-1206",
|
||||
"gemini-1.5-pro-001", "gemini-1.5-pro-002",
|
||||
"gemini-1.5-flash-001", "gemini-1.5-flash-002",
|
||||
"gemini-2.0-flash-exp", "gemini-2.0-flash-001",
|
||||
"gemini-2.0-flash-lite-preview-02-05",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
}
|
||||
|
||||
type Adaptor struct {
|
||||
|
||||
@@ -1,5 +1,14 @@
|
||||
package xai
|
||||
|
||||
//https://console.x.ai/
|
||||
|
||||
var ModelList = []string{
|
||||
"grok-2",
|
||||
"grok-vision-beta",
|
||||
"grok-2-vision-1212",
|
||||
"grok-2-vision",
|
||||
"grok-2-vision-latest",
|
||||
"grok-2-1212",
|
||||
"grok-2-latest",
|
||||
"grok-beta",
|
||||
}
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package xunfei
|
||||
|
||||
var ModelList = []string{
|
||||
"SparkDesk",
|
||||
"SparkDesk-v1.1",
|
||||
"SparkDesk-v2.1",
|
||||
"SparkDesk-v3.1",
|
||||
"SparkDesk-v3.1-128K",
|
||||
"SparkDesk-v3.5",
|
||||
"SparkDesk-v3.5-32K",
|
||||
"SparkDesk-v4.0",
|
||||
"Spark-Lite",
|
||||
"Spark-Pro",
|
||||
"Spark-Pro-128K",
|
||||
"Spark-Max",
|
||||
"Spark-Max-32K",
|
||||
"Spark-4.0-Ultra",
|
||||
}
|
||||
|
||||
97
relay/adaptor/xunfei/domain.go
Normal file
97
relay/adaptor/xunfei/domain.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package xunfei
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// https://www.xfyun.cn/doc/spark/Web.html#_1-%E6%8E%A5%E5%8F%A3%E8%AF%B4%E6%98%8E
|
||||
|
||||
//Spark4.0 Ultra 请求地址,对应的domain参数为4.0Ultra:
|
||||
//
|
||||
//wss://spark-api.xf-yun.com/v4.0/chat
|
||||
//Spark Max-32K请求地址,对应的domain参数为max-32k
|
||||
//
|
||||
//wss://spark-api.xf-yun.com/chat/max-32k
|
||||
//Spark Max请求地址,对应的domain参数为generalv3.5
|
||||
//
|
||||
//wss://spark-api.xf-yun.com/v3.5/chat
|
||||
//Spark Pro-128K请求地址,对应的domain参数为pro-128k:
|
||||
//
|
||||
// wss://spark-api.xf-yun.com/chat/pro-128k
|
||||
//Spark Pro请求地址,对应的domain参数为generalv3:
|
||||
//
|
||||
//wss://spark-api.xf-yun.com/v3.1/chat
|
||||
//Spark Lite请求地址,对应的domain参数为lite:
|
||||
//
|
||||
//wss://spark-api.xf-yun.com/v1.1/chat
|
||||
|
||||
// Lite、Pro、Pro-128K、Max、Max-32K和4.0 Ultra
|
||||
|
||||
func parseAPIVersionByModelName(modelName string) string {
|
||||
apiVersion := modelName2APIVersion(modelName)
|
||||
if apiVersion != "" {
|
||||
return apiVersion
|
||||
}
|
||||
|
||||
index := strings.IndexAny(modelName, "-")
|
||||
if index != -1 {
|
||||
return modelName[index+1:]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func modelName2APIVersion(modelName string) string {
|
||||
switch modelName {
|
||||
case "Spark-Lite":
|
||||
return "v1.1"
|
||||
case "Spark-Pro":
|
||||
return "v3.1"
|
||||
case "Spark-Pro-128K":
|
||||
return "v3.1-128K"
|
||||
case "Spark-Max":
|
||||
return "v3.5"
|
||||
case "Spark-Max-32K":
|
||||
return "v3.5-32K"
|
||||
case "Spark-4.0-Ultra":
|
||||
return "v4.0"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// https://www.xfyun.cn/doc/spark/Web.html#_1-%E6%8E%A5%E5%8F%A3%E8%AF%B4%E6%98%8E
|
||||
func apiVersion2domain(apiVersion string) string {
|
||||
switch apiVersion {
|
||||
case "v1.1":
|
||||
return "lite"
|
||||
case "v2.1":
|
||||
return "generalv2"
|
||||
case "v3.1":
|
||||
return "generalv3"
|
||||
case "v3.1-128K":
|
||||
return "pro-128k"
|
||||
case "v3.5":
|
||||
return "generalv3.5"
|
||||
case "v3.5-32K":
|
||||
return "max-32k"
|
||||
case "v4.0":
|
||||
return "4.0Ultra"
|
||||
}
|
||||
return "general" + apiVersion
|
||||
}
|
||||
|
||||
func getXunfeiAuthUrl(apiVersion string, apiKey string, apiSecret string) (string, string) {
|
||||
var authUrl string
|
||||
domain := apiVersion2domain(apiVersion)
|
||||
switch apiVersion {
|
||||
case "v3.1-128K":
|
||||
authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/chat/pro-128k"), apiKey, apiSecret)
|
||||
break
|
||||
case "v3.5-32K":
|
||||
authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/chat/max-32k"), apiKey, apiSecret)
|
||||
break
|
||||
default:
|
||||
authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/%s/chat", apiVersion), apiKey, apiSecret)
|
||||
}
|
||||
return domain, authUrl
|
||||
}
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/gorilla/websocket"
|
||||
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
@@ -270,48 +271,3 @@ func xunfeiMakeRequest(textRequest model.GeneralOpenAIRequest, domain, authUrl,
|
||||
|
||||
return dataChan, stopChan, nil
|
||||
}
|
||||
|
||||
func parseAPIVersionByModelName(modelName string) string {
|
||||
index := strings.IndexAny(modelName, "-")
|
||||
if index != -1 {
|
||||
return modelName[index+1:]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// https://www.xfyun.cn/doc/spark/Web.html#_1-%E6%8E%A5%E5%8F%A3%E8%AF%B4%E6%98%8E
|
||||
func apiVersion2domain(apiVersion string) string {
|
||||
switch apiVersion {
|
||||
case "v1.1":
|
||||
return "lite"
|
||||
case "v2.1":
|
||||
return "generalv2"
|
||||
case "v3.1":
|
||||
return "generalv3"
|
||||
case "v3.1-128K":
|
||||
return "pro-128k"
|
||||
case "v3.5":
|
||||
return "generalv3.5"
|
||||
case "v3.5-32K":
|
||||
return "max-32k"
|
||||
case "v4.0":
|
||||
return "4.0Ultra"
|
||||
}
|
||||
return "general" + apiVersion
|
||||
}
|
||||
|
||||
func getXunfeiAuthUrl(apiVersion string, apiKey string, apiSecret string) (string, string) {
|
||||
var authUrl string
|
||||
domain := apiVersion2domain(apiVersion)
|
||||
switch apiVersion {
|
||||
case "v3.1-128K":
|
||||
authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/chat/pro-128k"), apiKey, apiSecret)
|
||||
break
|
||||
case "v3.5-32K":
|
||||
authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/chat/max-32k"), apiKey, apiSecret)
|
||||
break
|
||||
default:
|
||||
authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/%s/chat", apiVersion), apiKey, apiSecret)
|
||||
}
|
||||
return domain, authUrl
|
||||
}
|
||||
|
||||
12
relay/adaptor/xunfeiv2/constants.go
Normal file
12
relay/adaptor/xunfeiv2/constants.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package xunfeiv2
|
||||
|
||||
// https://www.xfyun.cn/doc/spark/HTTP%E8%B0%83%E7%94%A8%E6%96%87%E6%A1%A3.html#_3-%E8%AF%B7%E6%B1%82%E8%AF%B4%E6%98%8E
|
||||
|
||||
var ModelList = []string{
|
||||
"lite",
|
||||
"generalv3",
|
||||
"pro-128k",
|
||||
"generalv3.5",
|
||||
"max-32k",
|
||||
"4.0Ultra",
|
||||
}
|
||||
@@ -1,7 +1,14 @@
|
||||
package zhipu
|
||||
|
||||
// https://open.bigmodel.cn/pricing
|
||||
|
||||
var ModelList = []string{
|
||||
"chatglm_turbo", "chatglm_pro", "chatglm_std", "chatglm_lite",
|
||||
"glm-4", "glm-4v", "glm-3-turbo", "embedding-2",
|
||||
"cogview-3",
|
||||
"glm-zero-preview", "glm-4-plus", "glm-4-0520", "glm-4-airx",
|
||||
"glm-4-air", "glm-4-long", "glm-4-flashx", "glm-4-flash",
|
||||
"glm-4", "glm-3-turbo",
|
||||
"glm-4v-plus", "glm-4v", "glm-4v-flash",
|
||||
"cogview-3-plus", "cogview-3", "cogview-3-flash",
|
||||
"cogviewx", "cogviewx-flash",
|
||||
"charglm-4", "emohaa", "codegeex-4",
|
||||
"embedding-2", "embedding-3",
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user