Merge remote-tracking branch 'origin/upstream/main'

This commit is contained in:
Laisky.Cai
2023-12-18 02:10:13 +00:00
19 changed files with 706 additions and 256 deletions

View File

@@ -60,7 +60,7 @@ _✨ Access all LLM through the standard OpenAI API format, easy to deploy & use
1. Support for multiple large models: 1. Support for multiple large models:
+ [x] [OpenAI ChatGPT Series Models](https://platform.openai.com/docs/guides/gpt/chat-completions-api) (Supports [Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference)) + [x] [OpenAI ChatGPT Series Models](https://platform.openai.com/docs/guides/gpt/chat-completions-api) (Supports [Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference))
+ [x] [Anthropic Claude Series Models](https://anthropic.com) + [x] [Anthropic Claude Series Models](https://anthropic.com)
+ [x] [Google PaLM2 Series Models](https://developers.generativeai.google) + [x] [Google PaLM2 and Gemini Series Models](https://developers.generativeai.google)
+ [x] [Baidu Wenxin Yiyuan Series Models](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html) + [x] [Baidu Wenxin Yiyuan Series Models](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html)
+ [x] [Alibaba Tongyi Qianwen Series Models](https://help.aliyun.com/document_detail/2400395.html) + [x] [Alibaba Tongyi Qianwen Series Models](https://help.aliyun.com/document_detail/2400395.html)
+ [x] [Zhipu ChatGLM Series Models](https://bigmodel.cn) + [x] [Zhipu ChatGLM Series Models](https://bigmodel.cn)

View File

@@ -60,7 +60,7 @@ _✨ 標準的な OpenAI API フォーマットを通じてすべての LLM に
1. 複数の大型モデルをサポート: 1. 複数の大型モデルをサポート:
+ [x] [OpenAI ChatGPT シリーズモデル](https://platform.openai.com/docs/guides/gpt/chat-completions-api) ([Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference) をサポート) + [x] [OpenAI ChatGPT シリーズモデル](https://platform.openai.com/docs/guides/gpt/chat-completions-api) ([Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference) をサポート)
+ [x] [Anthropic Claude シリーズモデル](https://anthropic.com) + [x] [Anthropic Claude シリーズモデル](https://anthropic.com)
+ [x] [Google PaLM2 シリーズモデル](https://developers.generativeai.google) + [x] [Google PaLM2/Gemini シリーズモデル](https://developers.generativeai.google)
+ [x] [Baidu Wenxin Yiyuan シリーズモデル](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html) + [x] [Baidu Wenxin Yiyuan シリーズモデル](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html)
+ [x] [Alibaba Tongyi Qianwen シリーズモデル](https://help.aliyun.com/document_detail/2400395.html) + [x] [Alibaba Tongyi Qianwen シリーズモデル](https://help.aliyun.com/document_detail/2400395.html)
+ [x] [Zhipu ChatGLM シリーズモデル](https://bigmodel.cn) + [x] [Zhipu ChatGLM シリーズモデル](https://bigmodel.cn)

View File

@@ -187,6 +187,7 @@ const (
ChannelTypeAIProxyLibrary = 21 ChannelTypeAIProxyLibrary = 21
ChannelTypeFastGPT = 22 ChannelTypeFastGPT = 22
ChannelTypeTencent = 23 ChannelTypeTencent = 23
ChannelTypeGemini = 24
) )
var ChannelBaseURLs = []string{ var ChannelBaseURLs = []string{
@@ -214,4 +215,5 @@ var ChannelBaseURLs = []string{
"https://api.aiproxy.io", // 21 "https://api.aiproxy.io", // 21
"https://fastgpt.run/api/openapi", // 22 "https://fastgpt.run/api/openapi", // 22
"https://hunyuan.cloud.tencent.com", //23 "https://hunyuan.cloud.tencent.com", //23
"", //24
} }

View File

@@ -4,3 +4,4 @@ var UsingSQLite = false
var UsingPostgreSQL = false var UsingPostgreSQL = false
var SQLitePath = "one-api.db" var SQLitePath = "one-api.db"
var SQLiteBusyTimeout = GetOrDefault("SQLITE_BUSY_TIMEOUT", 3000)

View File

@@ -83,12 +83,15 @@ var ModelRatio = map[string]float64{
"ERNIE-Bot-4": 8.572, // ¥0.12 / 1k tokens "ERNIE-Bot-4": 8.572, // ¥0.12 / 1k tokens
"Embedding-V1": 0.1429, // ¥0.002 / 1k tokens "Embedding-V1": 0.1429, // ¥0.002 / 1k tokens
"PaLM-2": 1, "PaLM-2": 1,
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
"chatglm_turbo": 0.3572, // ¥0.005 / 1k tokens "chatglm_turbo": 0.3572, // ¥0.005 / 1k tokens
"chatglm_pro": 0.7143, // ¥0.01 / 1k tokens "chatglm_pro": 0.7143, // ¥0.01 / 1k tokens
"chatglm_std": 0.3572, // ¥0.005 / 1k tokens "chatglm_std": 0.3572, // ¥0.005 / 1k tokens
"chatglm_lite": 0.1429, // ¥0.002 / 1k tokens "chatglm_lite": 0.1429, // ¥0.002 / 1k tokens
"qwen-turbo": 0.8572, // ¥0.012 / 1k tokens "qwen-turbo": 0.5715, // ¥0.008 / 1k tokens // https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-thousand-questions-metering-and-billing
"qwen-plus": 10, // ¥0.14 / 1k tokens "qwen-plus": 1.4286, // ¥0.02 / 1k tokens
"qwen-max": 1.4286, // ¥0.02 / 1k tokens
"qwen-max-longcontext": 1.4286, // ¥0.02 / 1k tokens
"text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens "text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens
"SparkDesk": 1.2858, // ¥0.018 / 1k tokens "SparkDesk": 1.2858, // ¥0.018 / 1k tokens
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens "360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens

View File

@@ -20,6 +20,8 @@ func testChannel(channel *model.Channel, request ChatRequest) (err error, openai
switch channel.Type { switch channel.Type {
case common.ChannelTypePaLM: case common.ChannelTypePaLM:
fallthrough fallthrough
case common.ChannelTypeGemini:
fallthrough
case common.ChannelTypeAnthropic: case common.ChannelTypeAnthropic:
fallthrough fallthrough
case common.ChannelTypeBaidu: case common.ChannelTypeBaidu:

View File

@@ -423,6 +423,15 @@ func init() {
Root: "PaLM-2", Root: "PaLM-2",
Parent: nil, Parent: nil,
}, },
{
Id: "gemini-pro",
Object: "model",
Created: 1677649963,
OwnedBy: "google",
Permission: permission,
Root: "gemini-pro",
Parent: nil,
},
{ {
Id: "chatglm_turbo", Id: "chatglm_turbo",
Object: "model", Object: "model",
@@ -477,6 +486,24 @@ func init() {
Root: "qwen-plus", Root: "qwen-plus",
Parent: nil, Parent: nil,
}, },
{
Id: "qwen-max",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "qwen-max",
Parent: nil,
},
{
Id: "qwen-max-longcontext",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "qwen-max-longcontext",
Parent: nil,
},
{ {
Id: "text-embedding-v1", Id: "text-embedding-v1",
Object: "model", Object: "model",

View File

@@ -13,13 +13,13 @@ package controller
// // https://help.aliyun.com/document_detail/613695.html?spm=a2c4g.2399480.0.0.1adb778fAdzP9w#341800c0f8w0r // // https://help.aliyun.com/document_detail/613695.html?spm=a2c4g.2399480.0.0.1adb778fAdzP9w#341800c0f8w0r
// type AliMessage struct { // type AliMessage struct {
// User string `json:"user"` // Content string `json:"content"`
// Bot string `json:"bot"` // Role string `json:"role"`
// } // }
// type AliInput struct { // type AliInput struct {
// Prompt string `json:"prompt"` // //Prompt string `json:"prompt"`
// History []AliMessage `json:"history"` // Messages []AliMessage `json:"messages"`
// } // }
// type AliParameters struct { // type AliParameters struct {

305
controller/relay-gemini.go Normal file
View File

@@ -0,0 +1,305 @@
package controller
import (
"bufio"
"encoding/json"
"fmt"
"io"
"net/http"
"one-api/common"
"strings"
"github.com/gin-gonic/gin"
)
type GeminiChatRequest struct {
Contents []GeminiChatContent `json:"contents"`
SafetySettings []GeminiChatSafetySettings `json:"safety_settings,omitempty"`
GenerationConfig GeminiChatGenerationConfig `json:"generation_config,omitempty"`
Tools []GeminiChatTools `json:"tools,omitempty"`
}
type GeminiInlineData struct {
MimeType string `json:"mimeType"`
Data string `json:"data"`
}
type GeminiPart struct {
Text string `json:"text,omitempty"`
InlineData *GeminiInlineData `json:"inlineData,omitempty"`
}
type GeminiChatContent struct {
Role string `json:"role,omitempty"`
Parts []GeminiPart `json:"parts"`
}
type GeminiChatSafetySettings struct {
Category string `json:"category"`
Threshold string `json:"threshold"`
}
type GeminiChatTools struct {
FunctionDeclarations any `json:"functionDeclarations,omitempty"`
}
type GeminiChatGenerationConfig struct {
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"topP,omitempty"`
TopK float64 `json:"topK,omitempty"`
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
CandidateCount int `json:"candidateCount,omitempty"`
StopSequences []string `json:"stopSequences,omitempty"`
}
// Setting safety to the lowest possible values since Gemini is already powerless enough
func requestOpenAI2Gemini(textRequest GeneralOpenAIRequest) *GeminiChatRequest {
geminiRequest := GeminiChatRequest{
Contents: make([]GeminiChatContent, 0, len(textRequest.Messages)),
//SafetySettings: []GeminiChatSafetySettings{
// {
// Category: "HARM_CATEGORY_HARASSMENT",
// Threshold: "BLOCK_ONLY_HIGH",
// },
// {
// Category: "HARM_CATEGORY_HATE_SPEECH",
// Threshold: "BLOCK_ONLY_HIGH",
// },
// {
// Category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
// Threshold: "BLOCK_ONLY_HIGH",
// },
// {
// Category: "HARM_CATEGORY_DANGEROUS_CONTENT",
// Threshold: "BLOCK_ONLY_HIGH",
// },
//},
GenerationConfig: GeminiChatGenerationConfig{
Temperature: textRequest.Temperature,
TopP: textRequest.TopP,
MaxOutputTokens: textRequest.MaxTokens,
},
}
if textRequest.Functions != nil {
geminiRequest.Tools = []GeminiChatTools{
{
FunctionDeclarations: textRequest.Functions,
},
}
}
shouldAddDummyModelMessage := false
for _, message := range textRequest.Messages {
content := GeminiChatContent{
Role: message.Role,
Parts: []GeminiPart{
{
Text: message.StringContent(),
},
},
}
// there's no assistant role in gemini and API shall vomit if Role is not user or model
if content.Role == "assistant" {
content.Role = "model"
}
// Converting system prompt to prompt from user for the same reason
if content.Role == "system" {
content.Role = "user"
shouldAddDummyModelMessage = true
}
geminiRequest.Contents = append(geminiRequest.Contents, content)
// If a system message is the last message, we need to add a dummy model message to make gemini happy
if shouldAddDummyModelMessage {
geminiRequest.Contents = append(geminiRequest.Contents, GeminiChatContent{
Role: "model",
Parts: []GeminiPart{
{
Text: "Okay",
},
},
})
shouldAddDummyModelMessage = false
}
}
return &geminiRequest
}
type GeminiChatResponse struct {
Candidates []GeminiChatCandidate `json:"candidates"`
PromptFeedback GeminiChatPromptFeedback `json:"promptFeedback"`
}
func (g *GeminiChatResponse) GetResponseText() string {
if g == nil {
return ""
}
if len(g.Candidates) > 0 && len(g.Candidates[0].Content.Parts) > 0 {
return g.Candidates[0].Content.Parts[0].Text
}
return ""
}
type GeminiChatCandidate struct {
Content GeminiChatContent `json:"content"`
FinishReason string `json:"finishReason"`
Index int64 `json:"index"`
SafetyRatings []GeminiChatSafetyRating `json:"safetyRatings"`
}
type GeminiChatSafetyRating struct {
Category string `json:"category"`
Probability string `json:"probability"`
}
type GeminiChatPromptFeedback struct {
SafetyRatings []GeminiChatSafetyRating `json:"safetyRatings"`
}
func responseGeminiChat2OpenAI(response *GeminiChatResponse) *OpenAITextResponse {
fullTextResponse := OpenAITextResponse{
Id: fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
Object: "chat.completion",
Created: common.GetTimestamp(),
Choices: make([]OpenAITextResponseChoice, 0, len(response.Candidates)),
}
for i, candidate := range response.Candidates {
choice := OpenAITextResponseChoice{
Index: i,
Message: Message{
Role: "assistant",
Content: "",
},
FinishReason: stopFinishReason,
}
if len(candidate.Content.Parts) > 0 {
choice.Message.Content = candidate.Content.Parts[0].Text
}
fullTextResponse.Choices = append(fullTextResponse.Choices, choice)
}
return &fullTextResponse
}
func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) *ChatCompletionsStreamResponse {
var choice ChatCompletionsStreamResponseChoice
choice.Delta.Content = geminiResponse.GetResponseText()
choice.FinishReason = &stopFinishReason
var response ChatCompletionsStreamResponse
response.Object = "chat.completion.chunk"
response.Model = "gemini"
response.Choices = []ChatCompletionsStreamResponseChoice{choice}
return &response
}
func geminiChatStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, string) {
responseText := ""
dataChan := make(chan string)
stopChan := make(chan bool)
scanner := bufio.NewScanner(resp.Body)
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := strings.Index(string(data), "\n"); i >= 0 {
return i + 1, data[0:i], nil
}
if atEOF {
return len(data), data, nil
}
return 0, nil, nil
})
go func() {
for scanner.Scan() {
data := scanner.Text()
data = strings.TrimSpace(data)
if !strings.HasPrefix(data, "\"text\": \"") {
continue
}
data = strings.TrimPrefix(data, "\"text\": \"")
data = strings.TrimSuffix(data, "\"")
dataChan <- data
}
stopChan <- true
}()
setEventStreamHeaders(c)
c.Stream(func(w io.Writer) bool {
select {
case data := <-dataChan:
// this is used to prevent annoying \ related format bug
data = fmt.Sprintf("{\"content\": \"%s\"}", data)
type dummyStruct struct {
Content string `json:"content"`
}
var dummy dummyStruct
err := json.Unmarshal([]byte(data), &dummy)
responseText += dummy.Content
var choice ChatCompletionsStreamResponseChoice
choice.Delta.Content = dummy.Content
response := ChatCompletionsStreamResponse{
Id: fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
Object: "chat.completion.chunk",
Created: common.GetTimestamp(),
Model: "gemini-pro",
Choices: []ChatCompletionsStreamResponseChoice{choice},
}
jsonResponse, err := json.Marshal(response)
if err != nil {
common.SysError("error marshalling stream response: " + err.Error())
return true
}
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
return true
case <-stopChan:
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
return false
}
})
err := resp.Body.Close()
if err != nil {
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
}
return nil, responseText
}
func geminiChatHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) {
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
}
err = resp.Body.Close()
if err != nil {
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
}
var geminiResponse GeminiChatResponse
err = json.Unmarshal(responseBody, &geminiResponse)
if err != nil {
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
}
if len(geminiResponse.Candidates) == 0 {
return &OpenAIErrorWithStatusCode{
OpenAIError: OpenAIError{
Message: "No candidates returned",
Type: "server_error",
Param: "",
Code: 500,
},
StatusCode: resp.StatusCode,
}, nil
}
fullTextResponse := responseGeminiChat2OpenAI(&geminiResponse)
completionTokens := countTokenText(geminiResponse.GetResponseText(), model)
usage := Usage{
PromptTokens: promptTokens,
CompletionTokens: completionTokens,
TotalTokens: promptTokens + completionTokens,
}
fullTextResponse.Usage = usage
jsonResponse, err := json.Marshal(fullTextResponse)
if err != nil {
return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
}
c.Writer.Header().Set("Content-Type", "application/json")
c.Writer.WriteHeader(resp.StatusCode)
_, err = c.Writer.Write(jsonResponse)
return nil, &usage
}

View File

@@ -19,7 +19,6 @@ func isWithinRange(element string, value int) bool {
if _, ok := common.DalleGenerationImageAmounts[element]; !ok { if _, ok := common.DalleGenerationImageAmounts[element]; !ok {
return false return false
} }
min := common.DalleGenerationImageAmounts[element][0] min := common.DalleGenerationImageAmounts[element][0]
max := common.DalleGenerationImageAmounts[element][1] max := common.DalleGenerationImageAmounts[element][1]
@@ -42,6 +41,10 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest) return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
} }
if imageRequest.N == 0 {
imageRequest.N = 1
}
// Size validation // Size validation
if imageRequest.Size != "" { if imageRequest.Size != "" {
imageSize = imageRequest.Size imageSize = imageRequest.Size
@@ -79,7 +82,10 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
// Number of generated images validation // Number of generated images validation
if isWithinRange(imageModel, imageRequest.N) == false { if isWithinRange(imageModel, imageRequest.N) == false {
return errorWrapper(errors.New("invalid value of n"), "n_not_within_range", http.StatusBadRequest) // channel not azure
if channelType != common.ChannelTypeAzure {
return errorWrapper(errors.New("invalid value of n"), "n_not_within_range", http.StatusBadRequest)
}
} }
// map model name // map model name
@@ -102,7 +108,7 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
baseURL = c.GetString("base_url") baseURL = c.GetString("base_url")
} }
fullRequestURL := getFullRequestURL(baseURL, requestURL, channelType) fullRequestURL := getFullRequestURL(baseURL, requestURL, channelType)
if channelType == common.ChannelTypeAzure && relayMode == RelayModeImagesGenerations { if channelType == common.ChannelTypeAzure {
// https://learn.microsoft.com/en-us/azure/ai-services/openai/dall-e-quickstart?tabs=dalle3%2Ccommand-line&pivots=rest-api // https://learn.microsoft.com/en-us/azure/ai-services/openai/dall-e-quickstart?tabs=dalle3%2Ccommand-line&pivots=rest-api
apiVersion := GetAPIVersion(c) apiVersion := GetAPIVersion(c)
// https://{resource_name}.openai.azure.com/openai/deployments/dall-e-3/images/generations?api-version=2023-06-01-preview // https://{resource_name}.openai.azure.com/openai/deployments/dall-e-3/images/generations?api-version=2023-06-01-preview

View File

@@ -1,205 +1,205 @@
package controller package controller
// import ( import (
// "encoding/json" "encoding/json"
// "fmt" "fmt"
// "github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
// "io" "io"
// "net/http" "net/http"
// "one-api/common" "one-api/common"
// ) )
// // https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage#request-body // https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage#request-body
// // https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage#response-body // https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage#response-body
// type PaLMChatMessage struct { type PaLMChatMessage struct {
// Author string `json:"author"` Author string `json:"author"`
// Content string `json:"content"` Content string `json:"content"`
// } }
// type PaLMFilter struct { type PaLMFilter struct {
// Reason string `json:"reason"` Reason string `json:"reason"`
// Message string `json:"message"` Message string `json:"message"`
// } }
// type PaLMPrompt struct { type PaLMPrompt struct {
// Messages []PaLMChatMessage `json:"messages"` Messages []PaLMChatMessage `json:"messages"`
// } }
// type PaLMChatRequest struct { type PaLMChatRequest struct {
// Prompt PaLMPrompt `json:"prompt"` Prompt PaLMPrompt `json:"prompt"`
// Temperature float64 `json:"temperature,omitempty"` Temperature float64 `json:"temperature,omitempty"`
// CandidateCount int `json:"candidateCount,omitempty"` CandidateCount int `json:"candidateCount,omitempty"`
// TopP float64 `json:"topP,omitempty"` TopP float64 `json:"topP,omitempty"`
// TopK int `json:"topK,omitempty"` TopK int `json:"topK,omitempty"`
// } }
// type PaLMError struct { type PaLMError struct {
// Code int `json:"code"` Code int `json:"code"`
// Message string `json:"message"` Message string `json:"message"`
// Status string `json:"status"` Status string `json:"status"`
// } }
// type PaLMChatResponse struct { type PaLMChatResponse struct {
// Candidates []PaLMChatMessage `json:"candidates"` Candidates []PaLMChatMessage `json:"candidates"`
// Messages []Message `json:"messages"` Messages []Message `json:"messages"`
// Filters []PaLMFilter `json:"filters"` Filters []PaLMFilter `json:"filters"`
// Error PaLMError `json:"error"` Error PaLMError `json:"error"`
// } }
// func requestOpenAI2PaLM(textRequest GeneralOpenAIRequest) *PaLMChatRequest { func requestOpenAI2PaLM(textRequest GeneralOpenAIRequest) *PaLMChatRequest {
// palmRequest := PaLMChatRequest{ palmRequest := PaLMChatRequest{
// Prompt: PaLMPrompt{ Prompt: PaLMPrompt{
// Messages: make([]PaLMChatMessage, 0, len(textRequest.Messages)), Messages: make([]PaLMChatMessage, 0, len(textRequest.Messages)),
// }, },
// Temperature: textRequest.Temperature, Temperature: textRequest.Temperature,
// CandidateCount: textRequest.N, CandidateCount: textRequest.N,
// TopP: textRequest.TopP, TopP: textRequest.TopP,
// TopK: textRequest.MaxTokens, TopK: textRequest.MaxTokens,
// } }
// for _, message := range textRequest.Messages { for _, message := range textRequest.Messages {
// palmMessage := PaLMChatMessage{ palmMessage := PaLMChatMessage{
// Content: message.Content, Content: message.StringContent(),
// } }
// if message.Role == "user" { if message.Role == "user" {
// palmMessage.Author = "0" palmMessage.Author = "0"
// } else { } else {
// palmMessage.Author = "1" palmMessage.Author = "1"
// } }
// palmRequest.Prompt.Messages = append(palmRequest.Prompt.Messages, palmMessage) palmRequest.Prompt.Messages = append(palmRequest.Prompt.Messages, palmMessage)
// } }
// return &palmRequest return &palmRequest
// } }
// func responsePaLM2OpenAI(response *PaLMChatResponse) *OpenAITextResponse { func responsePaLM2OpenAI(response *PaLMChatResponse) *OpenAITextResponse {
// fullTextResponse := OpenAITextResponse{ fullTextResponse := OpenAITextResponse{
// Choices: make([]OpenAITextResponseChoice, 0, len(response.Candidates)), Choices: make([]OpenAITextResponseChoice, 0, len(response.Candidates)),
// } }
// for i, candidate := range response.Candidates { for i, candidate := range response.Candidates {
// choice := OpenAITextResponseChoice{ choice := OpenAITextResponseChoice{
// Index: i, Index: i,
// Message: Message{ Message: Message{
// Role: "assistant", Role: "assistant",
// Content: candidate.Content, Content: candidate.Content,
// }, },
// FinishReason: "stop", FinishReason: "stop",
// } }
// fullTextResponse.Choices = append(fullTextResponse.Choices, choice) fullTextResponse.Choices = append(fullTextResponse.Choices, choice)
// } }
// return &fullTextResponse return &fullTextResponse
// } }
// func streamResponsePaLM2OpenAI(palmResponse *PaLMChatResponse) *ChatCompletionsStreamResponse { func streamResponsePaLM2OpenAI(palmResponse *PaLMChatResponse) *ChatCompletionsStreamResponse {
// var choice ChatCompletionsStreamResponseChoice var choice ChatCompletionsStreamResponseChoice
// if len(palmResponse.Candidates) > 0 { if len(palmResponse.Candidates) > 0 {
// choice.Delta.Content = palmResponse.Candidates[0].Content choice.Delta.Content = palmResponse.Candidates[0].Content
// } }
// choice.FinishReason = &stopFinishReason choice.FinishReason = &stopFinishReason
// var response ChatCompletionsStreamResponse var response ChatCompletionsStreamResponse
// response.Object = "chat.completion.chunk" response.Object = "chat.completion.chunk"
// response.Model = "palm2" response.Model = "palm2"
// response.Choices = []ChatCompletionsStreamResponseChoice{choice} response.Choices = []ChatCompletionsStreamResponseChoice{choice}
// return &response return &response
// } }
// func palmStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, string) { func palmStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, string) {
// responseText := "" responseText := ""
// responseId := fmt.Sprintf("chatcmpl-%s", common.GetUUID()) responseId := fmt.Sprintf("chatcmpl-%s", common.GetUUID())
// createdTime := common.GetTimestamp() createdTime := common.GetTimestamp()
// dataChan := make(chan string) dataChan := make(chan string)
// stopChan := make(chan bool) stopChan := make(chan bool)
// go func() { go func() {
// responseBody, err := io.ReadAll(resp.Body) responseBody, err := io.ReadAll(resp.Body)
// if err != nil { if err != nil {
// common.SysError("error reading stream response: " + err.Error()) common.SysError("error reading stream response: " + err.Error())
// stopChan <- true stopChan <- true
// return return
// } }
// err = resp.Body.Close() err = resp.Body.Close()
// if err != nil { if err != nil {
// common.SysError("error closing stream response: " + err.Error()) common.SysError("error closing stream response: " + err.Error())
// stopChan <- true stopChan <- true
// return return
// } }
// var palmResponse PaLMChatResponse var palmResponse PaLMChatResponse
// err = json.Unmarshal(responseBody, &palmResponse) err = json.Unmarshal(responseBody, &palmResponse)
// if err != nil { if err != nil {
// common.SysError("error unmarshalling stream response: " + err.Error()) common.SysError("error unmarshalling stream response: " + err.Error())
// stopChan <- true stopChan <- true
// return return
// } }
// fullTextResponse := streamResponsePaLM2OpenAI(&palmResponse) fullTextResponse := streamResponsePaLM2OpenAI(&palmResponse)
// fullTextResponse.Id = responseId fullTextResponse.Id = responseId
// fullTextResponse.Created = createdTime fullTextResponse.Created = createdTime
// if len(palmResponse.Candidates) > 0 { if len(palmResponse.Candidates) > 0 {
// responseText = palmResponse.Candidates[0].Content responseText = palmResponse.Candidates[0].Content
// } }
// jsonResponse, err := json.Marshal(fullTextResponse) jsonResponse, err := json.Marshal(fullTextResponse)
// if err != nil { if err != nil {
// common.SysError("error marshalling stream response: " + err.Error()) common.SysError("error marshalling stream response: " + err.Error())
// stopChan <- true stopChan <- true
// return return
// } }
// dataChan <- string(jsonResponse) dataChan <- string(jsonResponse)
// stopChan <- true stopChan <- true
// }() }()
// setEventStreamHeaders(c) setEventStreamHeaders(c)
// c.Stream(func(w io.Writer) bool { c.Stream(func(w io.Writer) bool {
// select { select {
// case data := <-dataChan: case data := <-dataChan:
// c.Render(-1, common.CustomEvent{Data: "data: " + data}) c.Render(-1, common.CustomEvent{Data: "data: " + data})
// return true return true
// case <-stopChan: case <-stopChan:
// c.Render(-1, common.CustomEvent{Data: "data: [DONE]"}) c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
// return false return false
// } }
// }) })
// err := resp.Body.Close() err := resp.Body.Close()
// if err != nil { if err != nil {
// return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), "" return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
// } }
// return nil, responseText return nil, responseText
// } }
// func palmHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) { func palmHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) {
// responseBody, err := io.ReadAll(resp.Body) responseBody, err := io.ReadAll(resp.Body)
// if err != nil { if err != nil {
// return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
// } }
// err = resp.Body.Close() err = resp.Body.Close()
// if err != nil { if err != nil {
// return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
// } }
// var palmResponse PaLMChatResponse var palmResponse PaLMChatResponse
// err = json.Unmarshal(responseBody, &palmResponse) err = json.Unmarshal(responseBody, &palmResponse)
// if err != nil { if err != nil {
// return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
// } }
// if palmResponse.Error.Code != 0 || len(palmResponse.Candidates) == 0 { if palmResponse.Error.Code != 0 || len(palmResponse.Candidates) == 0 {
// return &OpenAIErrorWithStatusCode{ return &OpenAIErrorWithStatusCode{
// OpenAIError: OpenAIError{ OpenAIError: OpenAIError{
// Message: palmResponse.Error.Message, Message: palmResponse.Error.Message,
// Type: palmResponse.Error.Status, Type: palmResponse.Error.Status,
// Param: "", Param: "",
// Code: palmResponse.Error.Code, Code: palmResponse.Error.Code,
// }, },
// StatusCode: resp.StatusCode, StatusCode: resp.StatusCode,
// }, nil }, nil
// } }
// fullTextResponse := responsePaLM2OpenAI(&palmResponse) fullTextResponse := responsePaLM2OpenAI(&palmResponse)
// completionTokens := countTokenText(palmResponse.Candidates[0].Content, model) completionTokens := countTokenText(palmResponse.Candidates[0].Content, model)
// usage := Usage{ usage := Usage{
// PromptTokens: promptTokens, PromptTokens: promptTokens,
// CompletionTokens: completionTokens, CompletionTokens: completionTokens,
// TotalTokens: promptTokens + completionTokens, TotalTokens: promptTokens + completionTokens,
// } }
// fullTextResponse.Usage = usage fullTextResponse.Usage = usage
// jsonResponse, err := json.Marshal(fullTextResponse) jsonResponse, err := json.Marshal(fullTextResponse)
// if err != nil { if err != nil {
// return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
// } }
// c.Writer.Header().Set("Content-Type", "application/json") c.Writer.Header().Set("Content-Type", "application/json")
// c.Writer.WriteHeader(resp.StatusCode) c.Writer.WriteHeader(resp.StatusCode)
// _, err = c.Writer.Write(jsonResponse) _, err = c.Writer.Write(jsonResponse)
// return nil, &usage return nil, &usage
// } }

View File

@@ -27,6 +27,7 @@ const (
APITypeXunfei APITypeXunfei
APITypeAIProxyLibrary APITypeAIProxyLibrary
APITypeTencent APITypeTencent
APITypeGemini
) )
var httpClient *http.Client var httpClient *http.Client
@@ -118,6 +119,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
apiType = APITypeAIProxyLibrary apiType = APITypeAIProxyLibrary
case common.ChannelTypeTencent: case common.ChannelTypeTencent:
apiType = APITypeTencent apiType = APITypeTencent
case common.ChannelTypeGemini:
apiType = APITypeGemini
} }
baseURL := common.ChannelBaseURLs[channelType] baseURL := common.ChannelBaseURLs[channelType]
requestURL := c.Request.URL.String() requestURL := c.Request.URL.String()
@@ -177,21 +180,38 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
apiKey := c.Request.Header.Get("Authorization") apiKey := c.Request.Header.Get("Authorization")
apiKey = strings.TrimPrefix(apiKey, "Bearer ") apiKey = strings.TrimPrefix(apiKey, "Bearer ")
fullRequestURL += "?key=" + apiKey fullRequestURL += "?key=" + apiKey
// case APITypeZhipu: case APITypeGemini:
// method := "invoke" requestBaseURL := "https://generativelanguage.googleapis.com"
// if textRequest.Stream { if baseURL != "" {
// method = "sse-invoke" requestBaseURL = baseURL
// } }
// fullRequestURL = fmt.Sprintf("https://open.bigmodel.cn/api/paas/v3/model-api/%s/%s", textRequest.Model, method) version := "v1"
// case APITypeAli: if c.GetString("api_version") != "" {
// fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation" version = c.GetString("api_version")
// if relayMode == RelayModeEmbeddings { }
// fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/embeddings/text-embedding/text-embedding" action := "generateContent"
// } if textRequest.Stream {
// case APITypeTencent: action = "streamGenerateContent"
// fullRequestURL = "https://hunyuan.cloud.tencent.com/hyllm/v1/chat/completions" }
case APITypeAIProxyLibrary: fullRequestURL = fmt.Sprintf("%s/%s/models/%s:%s", requestBaseURL, version, textRequest.Model, action)
fullRequestURL = fmt.Sprintf("%s/api/library/ask", baseURL) apiKey := c.Request.Header.Get("Authorization")
apiKey = strings.TrimPrefix(apiKey, "Bearer ")
fullRequestURL += "?key=" + apiKey
// case APITypeZhipu:
// method := "invoke"
// if textRequest.Stream {
// method = "sse-invoke"
// }
// fullRequestURL = fmt.Sprintf("https://open.bigmodel.cn/api/paas/v3/model-api/%s/%s", textRequest.Model, method)
// case APITypeAli:
// fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
// if relayMode == RelayModeEmbeddings {
// fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/embeddings/text-embedding/text-embedding"
// }
// case APITypeTencent:
// fullRequestURL = "https://hunyuan.cloud.tencent.com/hyllm/v1/chat/completions"
// case APITypeAIProxyLibrary:
// fullRequestURL = fmt.Sprintf("%s/api/library/ask", baseURL)
} }
var promptTokens int var promptTokens int
var completionTokens int var completionTokens int
@@ -281,13 +301,20 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
// return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError) // return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
// } // }
// requestBody = bytes.NewBuffer(jsonData) // requestBody = bytes.NewBuffer(jsonData)
// case APITypePaLM: case APITypePaLM:
// palmRequest := requestOpenAI2PaLM(textRequest) palmRequest := requestOpenAI2PaLM(textRequest)
// jsonStr, err := json.Marshal(palmRequest) jsonStr, err := json.Marshal(palmRequest)
// if err != nil { if err != nil {
// return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError) return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
// } }
// requestBody = bytes.NewBuffer(jsonStr) requestBody = bytes.NewBuffer(jsonStr)
case APITypeGemini:
geminiChatRequest := requestOpenAI2Gemini(textRequest)
jsonStr, err := json.Marshal(geminiChatRequest)
if err != nil {
return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
}
requestBody = bytes.NewBuffer(jsonStr)
// case APITypeZhipu: // case APITypeZhipu:
// zhipuRequest := requestOpenAI2Zhipu(textRequest) // zhipuRequest := requestOpenAI2Zhipu(textRequest)
// jsonStr, err := json.Marshal(zhipuRequest) // jsonStr, err := json.Marshal(zhipuRequest)
@@ -381,6 +408,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
// req.Header.Set("Authorization", apiKey) // req.Header.Set("Authorization", apiKey)
case APITypePaLM: case APITypePaLM:
// do not set Authorization header // do not set Authorization header
case APITypeGemini:
// do not set Authorization header
default: default:
req.Header.Set("Authorization", "Bearer "+apiKey) req.Header.Set("Authorization", "Bearer "+apiKey)
} }
@@ -522,25 +551,44 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
// } // }
// return nil // return nil
// } // }
// case APITypePaLM: case APITypePaLM:
// if textRequest.Stream { // PaLM2 API does not support stream if textRequest.Stream { // PaLM2 API does not support stream
// err, responseText := palmStreamHandler(c, resp) err, responseText := palmStreamHandler(c, resp)
// if err != nil { if err != nil {
// return err return err
// } }
// textResponse.Usage.PromptTokens = promptTokens textResponse.Usage.PromptTokens = promptTokens
// textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model) textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
// return nil return nil
// } else { } else {
// err, usage := palmHandler(c, resp, promptTokens, textRequest.Model) err, usage := palmHandler(c, resp, promptTokens, textRequest.Model)
// if err != nil { if err != nil {
// return err return err
// } }
// if usage != nil { if usage != nil {
// textResponse.Usage = *usage textResponse.Usage = *usage
// } }
// return nil return nil
// } }
case APITypeGemini:
if textRequest.Stream {
err, responseText := geminiChatStreamHandler(c, resp)
if err != nil {
return err
}
textResponse.Usage.PromptTokens = promptTokens
textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
return nil
} else {
err, usage := geminiChatHandler(c, resp, promptTokens, textRequest.Model)
if err != nil {
return err
}
if usage != nil {
textResponse.Usage = *usage
}
return nil
}
// case APITypeZhipu: // case APITypeZhipu:
// if isStream { // if isStream {
// err, usage := zhipuStreamHandler(c, resp) // err, usage := zhipuStreamHandler(c, resp)

View File

@@ -357,11 +357,52 @@ func setEventStreamHeaders(c *gin.Context) {
c.Writer.Header().Set("X-Accel-Buffering", "no") c.Writer.Header().Set("X-Accel-Buffering", "no")
} }
type GeneralErrorResponse struct {
Error OpenAIError `json:"error"`
Message string `json:"message"`
Msg string `json:"msg"`
Err string `json:"err"`
ErrorMsg string `json:"error_msg"`
Header struct {
Message string `json:"message"`
} `json:"header"`
Response struct {
Error struct {
Message string `json:"message"`
} `json:"error"`
} `json:"response"`
}
func (e GeneralErrorResponse) ToMessage() string {
if e.Error.Message != "" {
return e.Error.Message
}
if e.Message != "" {
return e.Message
}
if e.Msg != "" {
return e.Msg
}
if e.Err != "" {
return e.Err
}
if e.ErrorMsg != "" {
return e.ErrorMsg
}
if e.Header.Message != "" {
return e.Header.Message
}
if e.Response.Error.Message != "" {
return e.Response.Error.Message
}
return ""
}
func relayErrorHandler(resp *http.Response) (openAIErrorWithStatusCode *OpenAIErrorWithStatusCode) { func relayErrorHandler(resp *http.Response) (openAIErrorWithStatusCode *OpenAIErrorWithStatusCode) {
openAIErrorWithStatusCode = &OpenAIErrorWithStatusCode{ openAIErrorWithStatusCode = &OpenAIErrorWithStatusCode{
StatusCode: resp.StatusCode, StatusCode: resp.StatusCode,
OpenAIError: OpenAIError{ OpenAIError: OpenAIError{
Message: fmt.Sprintf("bad response status code %d", resp.StatusCode), Message: "",
Type: "upstream_error", Type: "upstream_error",
Code: "bad_response_status_code", Code: "bad_response_status_code",
Param: strconv.Itoa(resp.StatusCode), Param: strconv.Itoa(resp.StatusCode),
@@ -375,12 +416,20 @@ func relayErrorHandler(resp *http.Response) (openAIErrorWithStatusCode *OpenAIEr
if err != nil { if err != nil {
return return
} }
var textResponse TextResponse var errResponse GeneralErrorResponse
err = json.Unmarshal(responseBody, &textResponse) err = json.Unmarshal(responseBody, &errResponse)
if err != nil { if err != nil {
return return
} }
openAIErrorWithStatusCode.OpenAIError = textResponse.Error if errResponse.Error.Message != "" {
// OpenAI format error, so we override the default one
openAIErrorWithStatusCode.OpenAIError = errResponse.Error
} else {
openAIErrorWithStatusCode.OpenAIError.Message = errResponse.ToMessage()
}
if openAIErrorWithStatusCode.OpenAIError.Message == "" {
openAIErrorWithStatusCode.OpenAIError.Message = fmt.Sprintf("bad response status code %d", resp.StatusCode)
}
return return
} }

View File

@@ -311,7 +311,7 @@ type ChatCompletionsStreamResponseChoice struct {
Delta struct { Delta struct {
Content string `json:"content"` Content string `json:"content"`
} `json:"delta"` } `json:"delta"`
FinishReason *string `json:"finish_reason"` FinishReason *string `json:"finish_reason,omitempty"`
} }
type ChatCompletionsStreamResponse struct { type ChatCompletionsStreamResponse struct {

View File

@@ -16,7 +16,7 @@ func TestGeneralOpenAIRequest_TextMessages(t *testing.T) {
}{ }{
{ {
name: "Test with []any messages", name: "Test with []any messages",
messages: []Message{Message{}, Message{}}, messages: []Message{{}, {}},
want: []Message{{}, {}}, want: []Message{{}, {}},
wantErr: nil, wantErr: nil,
}, },

View File

@@ -87,6 +87,8 @@ func Distribute() func(c *gin.Context) {
c.Set("api_version", channel.Other) c.Set("api_version", channel.Other)
case common.ChannelTypeXunfei: case common.ChannelTypeXunfei:
c.Set("api_version", channel.Other) c.Set("api_version", channel.Other)
case common.ChannelTypeGemini:
c.Set("api_version", channel.Other)
case common.ChannelTypeAIProxyLibrary: case common.ChannelTypeAIProxyLibrary:
c.Set("library_id", channel.Other) c.Set("library_id", channel.Other)
case common.ChannelTypeAli: case common.ChannelTypeAli:

View File

@@ -1,16 +1,16 @@
package model package model
import ( import (
"one-api/common" "fmt"
"os"
"strings"
"time"
"github.com/Laisky/errors/v2" "github.com/Laisky/errors/v2"
"gorm.io/driver/mysql" "gorm.io/driver/mysql"
"gorm.io/driver/postgres" "gorm.io/driver/postgres"
"gorm.io/driver/sqlite" "gorm.io/driver/sqlite"
"gorm.io/gorm" "gorm.io/gorm"
"one-api/common"
"os"
"strings"
"time"
) )
var DB *gorm.DB var DB *gorm.DB
@@ -61,7 +61,8 @@ func chooseDB() (*gorm.DB, error) {
// Use SQLite // Use SQLite
common.SysLog("SQL_DSN not set, using SQLite as database") common.SysLog("SQL_DSN not set, using SQLite as database")
common.UsingSQLite = true common.UsingSQLite = true
return gorm.Open(sqlite.Open(common.SQLitePath), &gorm.Config{ config := fmt.Sprintf("?_busy_timeout=%d", common.SQLiteBusyTimeout)
return gorm.Open(sqlite.Open(common.SQLitePath+config), &gorm.Config{
PrepareStmt: true, // precompile SQL PrepareStmt: true, // precompile SQL
}) })
} }

View File

@@ -3,6 +3,7 @@ export const CHANNEL_OPTIONS = [
{ key: 14, text: 'Anthropic Claude', value: 14, color: 'black' }, { key: 14, text: 'Anthropic Claude', value: 14, color: 'black' },
{ key: 3, text: 'Azure OpenAI', value: 3, color: 'olive' }, { key: 3, text: 'Azure OpenAI', value: 3, color: 'olive' },
{ key: 11, text: 'Google PaLM2', value: 11, color: 'orange' }, { key: 11, text: 'Google PaLM2', value: 11, color: 'orange' },
{ key: 24, text: 'Google Gemini', value: 24, color: 'orange' },
{ key: 15, text: '百度文心千帆', value: 15, color: 'blue' }, { key: 15, text: '百度文心千帆', value: 15, color: 'blue' },
{ key: 17, text: '阿里通义千问', value: 17, color: 'orange' }, { key: 17, text: '阿里通义千问', value: 17, color: 'orange' },
{ key: 18, text: '讯飞星火认知', value: 18, color: 'blue' }, { key: 18, text: '讯飞星火认知', value: 18, color: 'blue' },

View File

@@ -69,7 +69,7 @@ const EditChannel = () => {
localModels = ['ERNIE-Bot', 'ERNIE-Bot-turbo', 'ERNIE-Bot-4', 'Embedding-V1']; localModels = ['ERNIE-Bot', 'ERNIE-Bot-turbo', 'ERNIE-Bot-4', 'Embedding-V1'];
break; break;
case 17: case 17:
localModels = ['qwen-turbo', 'qwen-plus', 'text-embedding-v1']; localModels = ['qwen-turbo', 'qwen-plus', 'qwen-max', 'qwen-max-longcontext', 'text-embedding-v1'];
break; break;
case 16: case 16:
localModels = ['chatglm_turbo', 'chatglm_pro', 'chatglm_std', 'chatglm_lite']; localModels = ['chatglm_turbo', 'chatglm_pro', 'chatglm_std', 'chatglm_lite'];
@@ -83,6 +83,9 @@ const EditChannel = () => {
case 23: case 23:
localModels = ['hunyuan']; localModels = ['hunyuan'];
break; break;
case 24:
localModels = ['gemini-pro'];
break;
} }
setInputs((inputs) => ({ ...inputs, models: localModels })); setInputs((inputs) => ({ ...inputs, models: localModels }));
} }