diff --git a/README.md b/README.md index f27a6794..5f9947b0 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,6 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用 + [x] [together.ai](https://www.together.ai/) + [x] [novita.ai](https://www.novita.ai/) + [x] [硅基流动 SiliconCloud](https://siliconflow.cn/siliconcloud) - + [x] [xAI](https://x.ai/) 2. 支持配置镜像以及众多[第三方代理服务](https://iamazing.cn/page/openai-api-third-party-services)。 3. 支持通过**负载均衡**的方式访问多个渠道。 4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。 diff --git a/common/ctxkey/key.go b/common/ctxkey/key.go index 90556b3a..ba8c4595 100644 --- a/common/ctxkey/key.go +++ b/common/ctxkey/key.go @@ -14,6 +14,7 @@ const ( OriginalModel = "original_model" Group = "group" ModelMapping = "model_mapping" + ParamsOverride = "params_override" ChannelName = "channel_name" TokenId = "token_id" TokenName = "token_name" diff --git a/controller/channel-test.go b/controller/channel-test.go index 971f5382..f8327284 100644 --- a/controller/channel-test.go +++ b/controller/channel-test.go @@ -76,9 +76,9 @@ func testChannel(channel *model.Channel, request *relaymodel.GeneralOpenAIReques if len(modelNames) > 0 { modelName = modelNames[0] } - } - if modelMap != nil && modelMap[modelName] != "" { - modelName = modelMap[modelName] + if modelMap != nil && modelMap[modelName] != "" { + modelName = modelMap[modelName] + } } meta.OriginModelName, meta.ActualModelName = request.Model, modelName request.Model = modelName diff --git a/middleware/distributor.go b/middleware/distributor.go index e2f75110..a2d8351f 100644 --- a/middleware/distributor.go +++ b/middleware/distributor.go @@ -62,6 +62,7 @@ func SetupContextForSelectedChannel(c *gin.Context, channel *model.Channel, mode c.Set(ctxkey.ChannelId, channel.Id) c.Set(ctxkey.ChannelName, channel.Name) c.Set(ctxkey.ModelMapping, channel.GetModelMapping()) + c.Set(ctxkey.ParamsOverride, channel.GetParamsOverride()) c.Set(ctxkey.OriginalModel, modelName) // for retry c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key)) c.Set(ctxkey.BaseURL, channel.GetBaseURL()) diff --git a/model/channel.go b/model/channel.go index 759dfd4f..f9e322a5 100644 --- a/model/channel.go +++ b/model/channel.go @@ -35,6 +35,7 @@ type Channel struct { Group string `json:"group" gorm:"type:varchar(32);default:'default'"` UsedQuota int64 `json:"used_quota" gorm:"bigint;default:0"` ModelMapping *string `json:"model_mapping" gorm:"type:varchar(1024);default:''"` + ParamsOverride *string `json:"default_params_override" gorm:"type:text;default:''"` Priority *int64 `json:"priority" gorm:"bigint;default:0"` Config string `json:"config"` } @@ -123,6 +124,20 @@ func (channel *Channel) GetModelMapping() map[string]string { return modelMapping } +func (channel *Channel) GetParamsOverride() map[string]map[string]interface{} { + if channel.ParamsOverride == nil || *channel.ParamsOverride == "" || *channel.ParamsOverride == "{}" { + return nil + } + paramsOverride := make(map[string]map[string]interface{}) + err := json.Unmarshal([]byte(*channel.ParamsOverride), ¶msOverride) + if err != nil { + logger.SysError(fmt.Sprintf("failed to unmarshal params override for channel %d, error: %s", channel.Id, err.Error())) + return nil + } + return paramsOverride +} + + func (channel *Channel) Insert() error { var err error err = DB.Create(channel).Error diff --git a/one-api b/one-api deleted file mode 100755 index 4c9190bb..00000000 Binary files a/one-api and /dev/null differ diff --git a/relay/adaptor/anthropic/constants.go b/relay/adaptor/anthropic/constants.go index 39326443..143d1efc 100644 --- a/relay/adaptor/anthropic/constants.go +++ b/relay/adaptor/anthropic/constants.go @@ -3,9 +3,7 @@ package anthropic var ModelList = []string{ "claude-instant-1.2", "claude-2.0", "claude-2.1", "claude-3-haiku-20240307", - "claude-3-5-haiku-20241022", "claude-3-sonnet-20240229", "claude-3-opus-20240229", "claude-3-5-sonnet-20240620", - "claude-3-5-sonnet-20241022", } diff --git a/relay/adaptor/aws/claude/main.go b/relay/adaptor/aws/claude/main.go index 20de7038..7142e46f 100644 --- a/relay/adaptor/aws/claude/main.go +++ b/relay/adaptor/aws/claude/main.go @@ -31,10 +31,8 @@ var AwsModelIDMap = map[string]string{ "claude-2.1": "anthropic.claude-v2:1", "claude-3-sonnet-20240229": "anthropic.claude-3-sonnet-20240229-v1:0", "claude-3-5-sonnet-20240620": "anthropic.claude-3-5-sonnet-20240620-v1:0", - "claude-3-5-sonnet-20241022": "anthropic.claude-3-5-sonnet-20241022-v2:0", "claude-3-opus-20240229": "anthropic.claude-3-opus-20240229-v1:0", "claude-3-haiku-20240307": "anthropic.claude-3-haiku-20240307-v1:0", - "claude-3-5-haiku-20241022": "anthropic.claude-3-5-haiku-20241022-v1:0", } func awsModelID(requestModel string) (string, error) { diff --git a/relay/adaptor/gemini/main.go b/relay/adaptor/gemini/main.go index d6ab45d4..51fd6aa8 100644 --- a/relay/adaptor/gemini/main.go +++ b/relay/adaptor/gemini/main.go @@ -4,12 +4,11 @@ import ( "bufio" "encoding/json" "fmt" + "github.com/songquanpeng/one-api/common/render" "io" "net/http" "strings" - "github.com/songquanpeng/one-api/common/render" - "github.com/songquanpeng/one-api/common" "github.com/songquanpeng/one-api/common/config" "github.com/songquanpeng/one-api/common/helper" @@ -29,11 +28,6 @@ const ( VisionMaxImageNum = 16 ) -var mimeTypeMap = map[string]string{ - "json_object": "application/json", - "text": "text/plain", -} - // Setting safety to the lowest possible values since Gemini is already powerless enough func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest { geminiRequest := ChatRequest{ @@ -62,15 +56,6 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest { MaxOutputTokens: textRequest.MaxTokens, }, } - if textRequest.ResponseFormat != nil { - if mimeType, ok := mimeTypeMap[textRequest.ResponseFormat.Type]; ok { - geminiRequest.GenerationConfig.ResponseMimeType = mimeType - } - if textRequest.ResponseFormat.JsonSchema != nil { - geminiRequest.GenerationConfig.ResponseSchema = textRequest.ResponseFormat.JsonSchema.Schema - geminiRequest.GenerationConfig.ResponseMimeType = mimeTypeMap["json_object"] - } - } if textRequest.Tools != nil { functions := make([]model.Function, 0, len(textRequest.Tools)) for _, tool := range textRequest.Tools { diff --git a/relay/adaptor/gemini/model.go b/relay/adaptor/gemini/model.go index f6a3b250..f7179ea4 100644 --- a/relay/adaptor/gemini/model.go +++ b/relay/adaptor/gemini/model.go @@ -65,12 +65,10 @@ type ChatTools struct { } type ChatGenerationConfig struct { - ResponseMimeType string `json:"responseMimeType,omitempty"` - ResponseSchema any `json:"responseSchema,omitempty"` - Temperature float64 `json:"temperature,omitempty"` - TopP float64 `json:"topP,omitempty"` - TopK float64 `json:"topK,omitempty"` - MaxOutputTokens int `json:"maxOutputTokens,omitempty"` - CandidateCount int `json:"candidateCount,omitempty"` - StopSequences []string `json:"stopSequences,omitempty"` + Temperature float64 `json:"temperature,omitempty"` + TopP float64 `json:"topP,omitempty"` + TopK float64 `json:"topK,omitempty"` + MaxOutputTokens int `json:"maxOutputTokens,omitempty"` + CandidateCount int `json:"candidateCount,omitempty"` + StopSequences []string `json:"stopSequences,omitempty"` } diff --git a/relay/adaptor/groq/constants.go b/relay/adaptor/groq/constants.go index 9c19df39..559851ee 100644 --- a/relay/adaptor/groq/constants.go +++ b/relay/adaptor/groq/constants.go @@ -4,21 +4,14 @@ package groq var ModelList = []string{ "gemma-7b-it", + "mixtral-8x7b-32768", + "llama3-8b-8192", + "llama3-70b-8192", "gemma2-9b-it", + "llama-3.1-405b-reasoning", "llama-3.1-70b-versatile", "llama-3.1-8b-instant", - "llama-3.2-11b-text-preview", - "llama-3.2-11b-vision-preview", - "llama-3.2-1b-preview", - "llama-3.2-3b-preview", - "llama-3.2-90b-text-preview", - "llama-guard-3-8b", - "llama3-70b-8192", - "llama3-8b-8192", "llama3-groq-70b-8192-tool-use-preview", "llama3-groq-8b-8192-tool-use-preview", - "llava-v1.5-7b-4096-preview", - "mixtral-8x7b-32768", - "distil-whisper-large-v3-en", "whisper-large-v3", } diff --git a/relay/adaptor/openai/adaptor.go b/relay/adaptor/openai/adaptor.go index 6946e402..5dc395ad 100644 --- a/relay/adaptor/openai/adaptor.go +++ b/relay/adaptor/openai/adaptor.go @@ -75,13 +75,6 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G if request == nil { return nil, errors.New("request is nil") } - if request.Stream { - // always return usage in stream mode - if request.StreamOptions == nil { - request.StreamOptions = &model.StreamOptions{} - } - request.StreamOptions.IncludeUsage = true - } return request, nil } diff --git a/relay/adaptor/openai/compatible.go b/relay/adaptor/openai/compatible.go index 15b4dcc0..0512f05c 100644 --- a/relay/adaptor/openai/compatible.go +++ b/relay/adaptor/openai/compatible.go @@ -11,10 +11,9 @@ import ( "github.com/songquanpeng/one-api/relay/adaptor/mistral" "github.com/songquanpeng/one-api/relay/adaptor/moonshot" "github.com/songquanpeng/one-api/relay/adaptor/novita" - "github.com/songquanpeng/one-api/relay/adaptor/siliconflow" "github.com/songquanpeng/one-api/relay/adaptor/stepfun" "github.com/songquanpeng/one-api/relay/adaptor/togetherai" - "github.com/songquanpeng/one-api/relay/adaptor/xai" + "github.com/songquanpeng/one-api/relay/adaptor/siliconflow" "github.com/songquanpeng/one-api/relay/channeltype" ) @@ -33,7 +32,6 @@ var CompatibleChannels = []int{ channeltype.TogetherAI, channeltype.Novita, channeltype.SiliconFlow, - channeltype.XAI, } func GetCompatibleChannelMeta(channelType int) (string, []string) { @@ -66,8 +64,6 @@ func GetCompatibleChannelMeta(channelType int) (string, []string) { return "novita", novita.ModelList case channeltype.SiliconFlow: return "siliconflow", siliconflow.ModelList - case channeltype.XAI: - return "xai", xai.ModelList default: return "openai", ModelList } diff --git a/relay/adaptor/vertexai/gemini/adapter.go b/relay/adaptor/vertexai/gemini/adapter.go index ceff1ed2..43e6cbcd 100644 --- a/relay/adaptor/vertexai/gemini/adapter.go +++ b/relay/adaptor/vertexai/gemini/adapter.go @@ -15,7 +15,7 @@ import ( ) var ModelList = []string{ - "gemini-1.5-pro-001", "gemini-1.5-flash-001", "gemini-pro", "gemini-pro-vision", "gemini-1.5-pro-002", "gemini-1.5-flash-002", + "gemini-1.5-pro-001", "gemini-1.5-flash-001", "gemini-pro", "gemini-pro-vision", } type Adaptor struct { diff --git a/relay/adaptor/xai/constants.go b/relay/adaptor/xai/constants.go deleted file mode 100644 index 9082b999..00000000 --- a/relay/adaptor/xai/constants.go +++ /dev/null @@ -1,5 +0,0 @@ -package xai - -var ModelList = []string{ - "grok-beta", -} diff --git a/relay/adaptor/xunfei/constants.go b/relay/adaptor/xunfei/constants.go index 5b82ac29..c2992c98 100644 --- a/relay/adaptor/xunfei/constants.go +++ b/relay/adaptor/xunfei/constants.go @@ -7,6 +7,5 @@ var ModelList = []string{ "SparkDesk-v3.1", "SparkDesk-v3.1-128K", "SparkDesk-v3.5", - "SparkDesk-v3.5-32K", "SparkDesk-v4.0", } diff --git a/relay/adaptor/xunfei/main.go b/relay/adaptor/xunfei/main.go index 16b89bca..99c4d1f4 100644 --- a/relay/adaptor/xunfei/main.go +++ b/relay/adaptor/xunfei/main.go @@ -292,8 +292,6 @@ func apiVersion2domain(apiVersion string) string { return "pro-128k" case "v3.5": return "generalv3.5" - case "v3.5-32K": - return "max-32k" case "v4.0": return "4.0Ultra" } @@ -305,10 +303,7 @@ func getXunfeiAuthUrl(apiVersion string, apiKey string, apiSecret string) (strin domain := apiVersion2domain(apiVersion) switch apiVersion { case "v3.1-128K": - authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/chat/pro-128k"), apiKey, apiSecret) - break - case "v3.5-32K": - authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/chat/max-32k"), apiKey, apiSecret) + authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/%s/pro-128k", apiVersion), apiKey, apiSecret) break default: authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/%s/chat", apiVersion), apiKey, apiSecret) diff --git a/relay/billing/ratio/model.go b/relay/billing/ratio/model.go index 1b58ec09..cf526875 100644 --- a/relay/billing/ratio/model.go +++ b/relay/billing/ratio/model.go @@ -79,10 +79,8 @@ var ModelRatio = map[string]float64{ "claude-2.0": 8.0 / 1000 * USD, "claude-2.1": 8.0 / 1000 * USD, "claude-3-haiku-20240307": 0.25 / 1000 * USD, - "claude-3-5-haiku-20241022": 1.0 / 1000 * USD, "claude-3-sonnet-20240229": 3.0 / 1000 * USD, "claude-3-5-sonnet-20240620": 3.0 / 1000 * USD, - "claude-3-5-sonnet-20241022": 3.0 / 1000 * USD, "claude-3-opus-20240229": 15.0 / 1000 * USD, // https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7 "ERNIE-4.0-8K": 0.120 * RMB, @@ -132,7 +130,6 @@ var ModelRatio = map[string]float64{ "SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens "SparkDesk-v3.1-128K": 1.2858, // ¥0.018 / 1k tokens "SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens - "SparkDesk-v3.5-32K": 1.2858, // ¥0.018 / 1k tokens "SparkDesk-v4.0": 1.2858, // ¥0.018 / 1k tokens "360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens "embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens @@ -164,21 +161,15 @@ var ModelRatio = map[string]float64{ "mistral-embed": 0.1 / 1000 * USD, // https://wow.groq.com/#:~:text=inquiries%C2%A0here.-,Model,-Current%20Speed "gemma-7b-it": 0.07 / 1000000 * USD, + "mixtral-8x7b-32768": 0.24 / 1000000 * USD, + "llama3-8b-8192": 0.05 / 1000000 * USD, + "llama3-70b-8192": 0.59 / 1000000 * USD, "gemma2-9b-it": 0.20 / 1000000 * USD, + "llama-3.1-405b-reasoning": 0.89 / 1000000 * USD, "llama-3.1-70b-versatile": 0.59 / 1000000 * USD, "llama-3.1-8b-instant": 0.05 / 1000000 * USD, - "llama-3.2-11b-text-preview": 0.05 / 1000000 * USD, - "llama-3.2-11b-vision-preview": 0.05 / 1000000 * USD, - "llama-3.2-1b-preview": 0.05 / 1000000 * USD, - "llama-3.2-3b-preview": 0.05 / 1000000 * USD, - "llama-3.2-90b-text-preview": 0.59 / 1000000 * USD, - "llama-guard-3-8b": 0.05 / 1000000 * USD, - "llama3-70b-8192": 0.59 / 1000000 * USD, - "llama3-8b-8192": 0.05 / 1000000 * USD, "llama3-groq-70b-8192-tool-use-preview": 0.89 / 1000000 * USD, "llama3-groq-8b-8192-tool-use-preview": 0.19 / 1000000 * USD, - "mixtral-8x7b-32768": 0.24 / 1000000 * USD, - // https://platform.lingyiwanwu.com/docs#-计费单元 "yi-34b-chat-0205": 2.5 / 1000 * RMB, "yi-34b-chat-200k": 12.0 / 1000 * RMB, @@ -209,8 +200,6 @@ var ModelRatio = map[string]float64{ "deepl-zh": 25.0 / 1000 * USD, "deepl-en": 25.0 / 1000 * USD, "deepl-ja": 25.0 / 1000 * USD, - // https://console.x.ai/ - "grok-beta": 5.0 / 1000 * USD, } var CompletionRatio = map[string]float64{ @@ -375,8 +364,6 @@ func GetCompletionRatio(name string, channelType int) float64 { return 3 case "command-r-plus": return 5 - case "grok-beta": - return 3 } return 1 } diff --git a/relay/channeltype/define.go b/relay/channeltype/define.go index 98316959..a261cff8 100644 --- a/relay/channeltype/define.go +++ b/relay/channeltype/define.go @@ -46,6 +46,5 @@ const ( VertextAI Proxy SiliconFlow - XAI Dummy ) diff --git a/relay/channeltype/url.go b/relay/channeltype/url.go index b8bd61f8..8727faea 100644 --- a/relay/channeltype/url.go +++ b/relay/channeltype/url.go @@ -45,8 +45,7 @@ var ChannelBaseURLs = []string{ "https://api.novita.ai/v3/openai", // 41 "", // 42 "", // 43 - "https://api.siliconflow.cn", // 44 - "https://api.x.ai", // 45 + "https://api.siliconflow.cn", // 44 } func init() { diff --git a/relay/controller/text.go b/relay/controller/text.go index 52ee9949..4d74819f 100644 --- a/relay/controller/text.go +++ b/relay/controller/text.go @@ -6,6 +6,8 @@ import ( "fmt" "io" "net/http" + "io/ioutil" + "context" "github.com/gin-gonic/gin" "github.com/songquanpeng/one-api/common/logger" @@ -23,13 +25,34 @@ import ( func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode { ctx := c.Request.Context() meta := meta.GetByContext(c) - // get & validate textRequest - textRequest, err := getAndValidateTextRequest(c, meta.Mode) - if err != nil { - logger.Errorf(ctx, "getAndValidateTextRequest failed: %s", err.Error()) - return openai.ErrorWrapper(err, "invalid_text_request", http.StatusBadRequest) - } - meta.IsStream = textRequest.Stream + + // Read the original request body + bodyBytes, err := ioutil.ReadAll(c.Request.Body) + if err != nil { + logger.Errorf(ctx, "Failed to read request body: %s", err.Error()) + return openai.ErrorWrapper(err, "invalid_request_body", http.StatusBadRequest) + } + + // Restore the request body for `getAndValidateTextRequest` + c.Request.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) + + // Call `getAndValidateTextRequest` + textRequest, err := getAndValidateTextRequest(c, meta.Mode) + if err != nil { + logger.Errorf(ctx, "getAndValidateTextRequest failed: %s", err.Error()) + return openai.ErrorWrapper(err, "invalid_text_request", http.StatusBadRequest) + } + meta.IsStream = textRequest.Stream + + // Parse the request body into a map + var rawRequest map[string]interface{} + if err := json.Unmarshal(bodyBytes, &rawRequest); err != nil { + logger.Errorf(ctx, "Failed to parse request body into map: %s", err.Error()) + return openai.ErrorWrapper(err, "invalid_json", http.StatusBadRequest) + } + + // Apply parameter overrides + applyParameterOverrides(ctx, meta, textRequest, rawRequest) // map model name meta.OriginModelName = textRequest.Model @@ -105,3 +128,70 @@ func getRequestBody(c *gin.Context, meta *meta.Meta, textRequest *model.GeneralO requestBody = bytes.NewBuffer(jsonData) return requestBody, nil } + +func applyParameterOverrides(ctx context.Context, meta *meta.Meta, textRequest *model.GeneralOpenAIRequest, rawRequest map[string]interface{}) { + if meta.ParamsOverride != nil { + modelName := meta.OriginModelName + if overrideParams, exists := meta.ParamsOverride[modelName]; exists { + logger.Infof(ctx, "Applying parameter overrides for model %s on channel %d", modelName, meta.ChannelId) + for key, value := range overrideParams { + if _, userSpecified := rawRequest[key]; !userSpecified { + // Apply the override since the user didn't specify this parameter + switch key { + case "temperature": + if v, ok := value.(float64); ok { + textRequest.Temperature = v + } else if v, ok := value.(int); ok { + textRequest.Temperature = float64(v) + } + case "max_tokens": + if v, ok := value.(float64); ok { + textRequest.MaxTokens = int(v) + } else if v, ok := value.(int); ok { + textRequest.MaxTokens = v + } + case "top_p": + if v, ok := value.(float64); ok { + textRequest.TopP = v + } else if v, ok := value.(int); ok { + textRequest.TopP = float64(v) + } + case "frequency_penalty": + if v, ok := value.(float64); ok { + textRequest.FrequencyPenalty = v + } else if v, ok := value.(int); ok { + textRequest.FrequencyPenalty = float64(v) + } + case "presence_penalty": + if v, ok := value.(float64); ok { + textRequest.PresencePenalty = v + } else if v, ok := value.(int); ok { + textRequest.PresencePenalty = float64(v) + } + case "stop": + textRequest.Stop = value + case "n": + if v, ok := value.(float64); ok { + textRequest.N = int(v) + } else if v, ok := value.(int); ok { + textRequest.N = v + } + case "stream": + if v, ok := value.(bool); ok { + textRequest.Stream = v + } + case "num_ctx": + if v, ok := value.(float64); ok { + textRequest.NumCtx = int(v) + } else if v, ok := value.(int); ok { + textRequest.NumCtx = v + } + // Handle other parameters as needed + default: + logger.Warnf(ctx, "Unknown parameter override key: %s", key) + } + } + } + } + } +} \ No newline at end of file diff --git a/relay/meta/relay_meta.go b/relay/meta/relay_meta.go index b1761e9a..e7e051c6 100644 --- a/relay/meta/relay_meta.go +++ b/relay/meta/relay_meta.go @@ -18,6 +18,7 @@ type Meta struct { UserId int Group string ModelMapping map[string]string + ParamsOverride map[string]map[string]interface{} // BaseURL is the proxy url set in the channel config BaseURL string APIKey string @@ -47,6 +48,11 @@ func GetByContext(c *gin.Context) *Meta { APIKey: strings.TrimPrefix(c.Request.Header.Get("Authorization"), "Bearer "), RequestURLPath: c.Request.URL.String(), } + // Retrieve ParamsOverride + paramsOverride, exists := c.Get(ctxkey.ParamsOverride) + if exists && paramsOverride != nil { + meta.ParamsOverride = paramsOverride.(map[string]map[string]interface{}) + } cfg, ok := c.Get(ctxkey.Config) if ok { meta.Config = cfg.(model.ChannelConfig) diff --git a/relay/model/constant.go b/relay/model/constant.go index c9d6d645..f6cf1924 100644 --- a/relay/model/constant.go +++ b/relay/model/constant.go @@ -1,7 +1,6 @@ package model const ( - ContentTypeText = "text" - ContentTypeImageURL = "image_url" - ContentTypeInputAudio = "input_audio" + ContentTypeText = "text" + ContentTypeImageURL = "image_url" ) diff --git a/relay/model/general.go b/relay/model/general.go index fe73779e..aacc8467 100644 --- a/relay/model/general.go +++ b/relay/model/general.go @@ -12,20 +12,9 @@ type JSONSchema struct { Strict *bool `json:"strict,omitempty"` } -type Audio struct { - Voice string `json:"voice,omitempty"` - Format string `json:"format,omitempty"` -} - -type StreamOptions struct { - IncludeUsage bool `json:"include_usage,omitempty"` -} - type GeneralOpenAIRequest struct { Messages []Message `json:"messages,omitempty"` Model string `json:"model,omitempty"` - Modalities []string `json:"modalities,omitempty"` - Audio *Audio `json:"audio,omitempty"` FrequencyPenalty float64 `json:"frequency_penalty,omitempty"` MaxTokens int `json:"max_tokens,omitempty"` N int `json:"n,omitempty"` @@ -34,7 +23,6 @@ type GeneralOpenAIRequest struct { Seed float64 `json:"seed,omitempty"` Stop any `json:"stop,omitempty"` Stream bool `json:"stream,omitempty"` - StreamOptions *StreamOptions `json:"stream_options,omitempty"` Temperature float64 `json:"temperature,omitempty"` TopP float64 `json:"top_p,omitempty"` TopK int `json:"top_k,omitempty"` @@ -49,7 +37,7 @@ type GeneralOpenAIRequest struct { Dimensions int `json:"dimensions,omitempty"` Instruction string `json:"instruction,omitempty"` Size string `json:"size,omitempty"` - NumCtx int `json:"num_ctx,omitempty"` + NumCtx int `json:"num_ctx,omitempty"` } func (r GeneralOpenAIRequest) ParseInput() []string { diff --git a/web/air/src/components/TokensTable.js b/web/air/src/components/TokensTable.js index 48836c85..c87657dc 100644 --- a/web/air/src/components/TokensTable.js +++ b/web/air/src/components/TokensTable.js @@ -395,7 +395,7 @@ const TokensTable = () => { url = mjLink + `/#/?settings={"key":"sk-${key}","url":"${serverAddress}"}`; break; case 'lobechat': - url = chatLink + `/?settings={"keyVaults":{"openai":{"apiKey":"sk-${key}","baseURL":"${serverAddress}/v1"}}}`; + url = chatLink + `/?settings={"keyVaults":{"openai":{"apiKey":"sk-${key}","baseURL":"${serverAddress}"/v1"}}}`; break; default: if (!chatLink) { diff --git a/web/air/src/constants/channel.constants.js b/web/air/src/constants/channel.constants.js index a7e984ec..04fe94f1 100644 --- a/web/air/src/constants/channel.constants.js +++ b/web/air/src/constants/channel.constants.js @@ -30,7 +30,6 @@ export const CHANNEL_OPTIONS = [ { key: 42, text: 'VertexAI', value: 42, color: 'blue' }, { key: 43, text: 'Proxy', value: 43, color: 'blue' }, { key: 44, text: 'SiliconFlow', value: 44, color: 'blue' }, - { key: 45, text: 'xAI', value: 45, color: 'blue' }, { key: 8, text: '自定义渠道', value: 8, color: 'pink' }, { key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' }, { key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' }, diff --git a/web/air/src/pages/Channel/EditChannel.js b/web/air/src/pages/Channel/EditChannel.js index ffed94a0..b50a6e77 100644 --- a/web/air/src/pages/Channel/EditChannel.js +++ b/web/air/src/pages/Channel/EditChannel.js @@ -63,7 +63,7 @@ const EditChannel = (props) => { let localModels = []; switch (value) { case 14: - localModels = ["claude-instant-1.2", "claude-2", "claude-2.0", "claude-2.1", "claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307", "claude-3-5-haiku-20241022", "claude-3-5-sonnet-20240620", "claude-3-5-sonnet-20241022"]; + localModels = ["claude-instant-1.2", "claude-2", "claude-2.0", "claude-2.1", "claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307", "claude-3-5-sonnet-20240620"]; break; case 11: localModels = ['PaLM-2']; @@ -78,7 +78,7 @@ const EditChannel = (props) => { localModels = ['chatglm_pro', 'chatglm_std', 'chatglm_lite']; break; case 18: - localModels = ['SparkDesk', 'SparkDesk-v1.1', 'SparkDesk-v2.1', 'SparkDesk-v3.1', 'SparkDesk-v3.1-128K', 'SparkDesk-v3.5', 'SparkDesk-v3.5-32K', 'SparkDesk-v4.0']; + localModels = ['SparkDesk', 'SparkDesk-v1.1', 'SparkDesk-v2.1', 'SparkDesk-v3.1', 'SparkDesk-v3.1-128K', 'SparkDesk-v3.5', 'SparkDesk-v4.0']; break; case 19: localModels = ['360GPT_S2_V9', 'embedding-bert-512-v1', 'embedding_s1_v1', 'semantic_similarity_s1_v1']; diff --git a/web/berry/src/constants/ChannelConstants.js b/web/berry/src/constants/ChannelConstants.js index 35398875..98ea7ca5 100644 --- a/web/berry/src/constants/ChannelConstants.js +++ b/web/berry/src/constants/ChannelConstants.js @@ -179,12 +179,6 @@ export const CHANNEL_OPTIONS = { value: 44, color: 'primary' }, - 45: { - key: 45, - text: 'xAI', - value: 45, - color: 'primary' - }, 41: { key: 41, text: 'Novita', diff --git a/web/berry/src/views/Channel/type/Config.js b/web/berry/src/views/Channel/type/Config.js index 32ffec32..4a8fc27a 100644 --- a/web/berry/src/views/Channel/type/Config.js +++ b/web/berry/src/views/Channel/type/Config.js @@ -91,7 +91,7 @@ const typeConfig = { other: '版本号' }, input: { - models: ['SparkDesk', 'SparkDesk-v1.1', 'SparkDesk-v2.1', 'SparkDesk-v3.1', 'SparkDesk-v3.1-128K', 'SparkDesk-v3.5', 'SparkDesk-v3.5-32K', 'SparkDesk-v4.0'] + models: ['SparkDesk', 'SparkDesk-v1.1', 'SparkDesk-v2.1', 'SparkDesk-v3.1', 'SparkDesk-v3.1-128K', 'SparkDesk-v3.5', 'SparkDesk-v4.0'] }, prompt: { key: '按照如下格式输入:APPID|APISecret|APIKey', @@ -223,9 +223,6 @@ const typeConfig = { }, modelGroup: 'anthropic' }, - 45: { - modelGroup: 'xai' - }, }; export { defaultConfig, typeConfig }; diff --git a/web/berry/src/views/Token/component/TableRow.js b/web/berry/src/views/Token/component/TableRow.js index 4306be5e..0a7efd98 100644 --- a/web/berry/src/views/Token/component/TableRow.js +++ b/web/berry/src/views/Token/component/TableRow.js @@ -33,7 +33,7 @@ const COPY_OPTIONS = [ }, { key: 'ama', text: 'BotGem', url: 'ama://set-api-key?server={serverAddress}&key=sk-{key}', encode: true }, { key: 'opencat', text: 'OpenCat', url: 'opencat://team/join?domain={serverAddress}&token=sk-{key}', encode: true }, - { key: 'lobechat', text: 'LobeChat', url: 'https://lobehub.com/?settings={"keyVaults":{"openai":{"apiKey":"sk-{key}","baseURL":"{serverAddress}"}}}', encode: true } + { key: 'lobechat', text: 'LobeChat', url: 'https://lobehub.com/?settings={"keyVaults":{"openai":{"apiKey":"user-key","baseURL":"https://your-proxy.com/v1"}}}', encode: true } ]; function replacePlaceholders(text, key, serverAddress) { diff --git a/web/default/src/components/ChannelsTable.js b/web/default/src/components/ChannelsTable.js index 6e0ec05d..48f476a7 100644 --- a/web/default/src/components/ChannelsTable.js +++ b/web/default/src/components/ChannelsTable.js @@ -59,12 +59,6 @@ function renderBalance(type, balance) { } } -function isShowDetail() { - return localStorage.getItem("show_detail") === "true"; -} - -const promptID = "detail" - const ChannelsTable = () => { const [channels, setChannels] = useState([]); const [loading, setLoading] = useState(true); @@ -72,8 +66,7 @@ const ChannelsTable = () => { const [searchKeyword, setSearchKeyword] = useState(''); const [searching, setSearching] = useState(false); const [updatingBalance, setUpdatingBalance] = useState(false); - const [showPrompt, setShowPrompt] = useState(shouldShowPrompt(promptID)); - const [showDetail, setShowDetail] = useState(isShowDetail()); + const [showPrompt, setShowPrompt] = useState(shouldShowPrompt("channel-test")); const loadChannels = async (startIdx) => { const res = await API.get(`/api/channel/?p=${startIdx}`); @@ -127,11 +120,6 @@ const ChannelsTable = () => { await loadChannels(activePage - 1); }; - const toggleShowDetail = () => { - setShowDetail(!showDetail); - localStorage.setItem("show_detail", (!showDetail).toString()); - } - useEffect(() => { loadChannels(0) .then() @@ -376,13 +364,11 @@ const ChannelsTable = () => { showPrompt && ( { setShowPrompt(false); - setPromptShown(promptID); + setPromptShown("channel-test"); }}> OpenAI 渠道已经不再支持通过 key 获取余额,因此余额显示为 0。对于支持的渠道类型,请点击余额进行刷新。
渠道测试仅支持 chat 模型,优先使用 gpt-3.5-turbo,如果该模型不可用则使用你所配置的模型列表中的第一个模型。 -
- 点击下方详情按钮可以显示余额以及设置额外的测试模型。
) } @@ -442,7 +428,6 @@ const ChannelsTable = () => { onClick={() => { sortChannel('balance'); }} - hidden={!showDetail} > 余额 @@ -454,7 +439,7 @@ const ChannelsTable = () => { > 优先级 - + 测试模型 操作 @@ -482,7 +467,7 @@ const ChannelsTable = () => { basic /> -