Compare commits

...

17 Commits

Author SHA1 Message Date
JustSong
b747cdbc6f fix: fix getAndValidateTextRequest failed: unexpected end of JSON input (close #1043) 2024-02-26 22:52:16 +08:00
JustSong
6b27d6659a fix: add role for ChatCompletionsStreamResponseChoice.Delta 2024-02-25 19:49:22 +08:00
JustSong
dc5b781191 fix: fix stream response id 2024-02-25 19:47:59 +08:00
JustSong
c880b4a9a3 fix: fix missing index in ChatCompletionsStreamResponseChoice (#1037) 2024-02-25 19:17:37 +08:00
JustSong
565ea58e68 feat: built in retry supported (close #1036, close #770) 2024-02-25 19:01:49 +08:00
JustSong
f141a37a9e fix: fix "error update user quota cache: Error 1040: Too many connections" 2024-02-25 16:58:14 +08:00
JustSong
5b78886ad3 fix: fix i18n 2024-02-25 16:53:46 +08:00
JustSong
87c7c4f0e6 fix: rm history build before building 2024-02-25 02:07:34 +08:00
JustSong
4c4a873890 fix: add an ending line for THEMES 2024-02-25 01:59:40 +08:00
JustSong
0664bdfda1 fix: fix build.sh (close #1026) 2024-02-25 01:53:27 +08:00
JustSong
32387d9c20 fix: fix version is blank 2024-02-21 22:21:01 +08:00
JustSong
bd888f2eb7 fix: fix prompt token is zero (close #1023) 2024-02-21 22:19:42 +08:00
JustSong
cece77e533 fix: fix model list 2024-02-19 22:20:18 +08:00
JustSong
2a5468e23c refactor: remove useless button (close #1014) 2024-02-18 22:21:37 +08:00
JustSong
d0e415893b fix: fix SparkDesk model name 2024-02-18 17:16:11 +08:00
JustSong
6cf5ce9a7a fix: fix SparkDesk model name 2024-02-18 17:11:16 +08:00
JustSong
f598b9df87 feat: add new SparkDesk models 2024-02-18 17:02:36 +08:00
25 changed files with 226 additions and 86 deletions

View File

@@ -23,7 +23,7 @@ jobs:
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
with: with:
node-version: 16 node-version: 16
- name: Build Frontend (theme default) - name: Build Frontend
env: env:
CI: "" CI: ""
run: | run: |

View File

@@ -23,7 +23,7 @@ jobs:
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
with: with:
node-version: 16 node-version: 16
- name: Build Frontend (theme default) - name: Build Frontend
env: env:
CI: "" CI: ""
run: | run: |

View File

@@ -26,7 +26,7 @@ jobs:
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
with: with:
node-version: 16 node-version: 16
- name: Build Frontend (theme default) - name: Build Frontend
env: env:
CI: "" CI: ""
run: | run: |

View File

@@ -23,7 +23,7 @@ ADD go.mod go.sum ./
RUN go mod download RUN go mod download
COPY . . COPY . .
COPY --from=builder /web/build ./web/build COPY --from=builder /web/build ./web/build
RUN go build -ldflags "-s -w -X 'one-api/common.Version=$(cat VERSION)' -extldflags '-static'" -o one-api RUN go build -ldflags "-s -w -X 'github.com/songquanpeng/one-api/common.Version=$(cat VERSION)' -extldflags '-static'" -o one-api
FROM alpine FROM alpine

View File

@@ -8,12 +8,24 @@ import (
"strings" "strings"
) )
func UnmarshalBodyReusable(c *gin.Context, v any) error { const KeyRequestBody = "key_request_body"
func GetRequestBody(c *gin.Context) ([]byte, error) {
requestBody, _ := c.Get(KeyRequestBody)
if requestBody != nil {
return requestBody.([]byte), nil
}
requestBody, err := io.ReadAll(c.Request.Body) requestBody, err := io.ReadAll(c.Request.Body)
if err != nil { if err != nil {
return err return nil, err
} }
err = c.Request.Body.Close() _ = c.Request.Body.Close()
c.Set(KeyRequestBody, requestBody)
return requestBody.([]byte), nil
}
func UnmarshalBodyReusable(c *gin.Context, v any) error {
requestBody, err := GetRequestBody(c)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -137,6 +137,7 @@ func GetUUID() string {
} }
const keyChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" const keyChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
const keyNumbers = "0123456789"
func init() { func init() {
rand.Seed(time.Now().UnixNano()) rand.Seed(time.Now().UnixNano())
@@ -168,6 +169,15 @@ func GetRandomString(length int) string {
return string(key) return string(key)
} }
func GetRandomNumberString(length int) string {
rand.Seed(time.Now().UnixNano())
key := make([]byte, length)
for i := 0; i < length; i++ {
key[i] = keyNumbers[rand.Intn(len(keyNumbers))]
}
return string(key)
}
func GetTimestamp() int64 { func GetTimestamp() int64 {
return time.Now().Unix() return time.Now().Unix()
} }

View File

@@ -112,6 +112,10 @@ var ModelRatio = map[string]float64{
"qwen-max-longcontext": 1.4286, // ¥0.02 / 1k tokens "qwen-max-longcontext": 1.4286, // ¥0.02 / 1k tokens
"text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens "text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens
"SparkDesk": 1.2858, // ¥0.018 / 1k tokens "SparkDesk": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v1.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens "360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens "embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens "embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens

View File

@@ -58,6 +58,9 @@ func init() {
}) })
// https://platform.openai.com/docs/models/model-endpoint-compatibility // https://platform.openai.com/docs/models/model-endpoint-compatibility
for i := 0; i < constant.APITypeDummy; i++ { for i := 0; i < constant.APITypeDummy; i++ {
if i == constant.APITypeAIProxyLibrary {
continue
}
adaptor := helper.GetAdaptor(i) adaptor := helper.GetAdaptor(i)
channelName := adaptor.GetChannelName() channelName := adaptor.GetChannelName()
modelNames := adaptor.GetModelList() modelNames := adaptor.GetModelList()

View File

@@ -1,23 +1,27 @@
package controller package controller
import ( import (
"bytes"
"context"
"fmt" "fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config" "github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper" "github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger" "github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/middleware"
dbmodel "github.com/songquanpeng/one-api/model"
"github.com/songquanpeng/one-api/relay/constant" "github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/controller" "github.com/songquanpeng/one-api/relay/controller"
"github.com/songquanpeng/one-api/relay/model" "github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util" "github.com/songquanpeng/one-api/relay/util"
"io"
"net/http" "net/http"
"strconv"
) )
// https://platform.openai.com/docs/api-reference/chat // https://platform.openai.com/docs/api-reference/chat
func Relay(c *gin.Context) { func relay(c *gin.Context, relayMode int) *model.ErrorWithStatusCode {
relayMode := constant.Path2RelayMode(c.Request.URL.Path)
var err *model.ErrorWithStatusCode var err *model.ErrorWithStatusCode
switch relayMode { switch relayMode {
case constant.RelayModeImagesGenerations: case constant.RelayModeImagesGenerations:
@@ -31,32 +35,85 @@ func Relay(c *gin.Context) {
default: default:
err = controller.RelayTextHelper(c) err = controller.RelayTextHelper(c)
} }
if err != nil { return err
requestId := c.GetString(logger.RequestIdKey) }
retryTimesStr := c.Query("retry")
retryTimes, _ := strconv.Atoi(retryTimesStr) func Relay(c *gin.Context) {
if retryTimesStr == "" { ctx := c.Request.Context()
retryTimes = config.RetryTimes relayMode := constant.Path2RelayMode(c.Request.URL.Path)
bizErr := relay(c, relayMode)
if bizErr == nil {
return
}
channelId := c.GetInt("channel_id")
lastFailedChannelId := channelId
channelName := c.GetString("channel_name")
group := c.GetString("group")
originalModel := c.GetString("original_model")
go processChannelRelayError(ctx, channelId, channelName, bizErr)
requestId := c.GetString(logger.RequestIdKey)
retryTimes := config.RetryTimes
if !shouldRetry(c, bizErr.StatusCode) {
logger.Errorf(ctx, "relay error happen, status code is %d, won't retry in this case", bizErr.StatusCode)
retryTimes = 0
}
for i := retryTimes; i > 0; i-- {
channel, err := dbmodel.CacheGetRandomSatisfiedChannel(group, originalModel)
if err != nil {
logger.Errorf(ctx, "CacheGetRandomSatisfiedChannel failed: %w", err)
break
} }
if retryTimes > 0 { logger.Infof(ctx, "using channel #%d to retry (remain times %d)", channel.Id, i)
c.Redirect(http.StatusTemporaryRedirect, fmt.Sprintf("%s?retry=%d", c.Request.URL.Path, retryTimes-1)) if channel.Id == lastFailedChannelId {
} else { continue
if err.StatusCode == http.StatusTooManyRequests { }
err.Error.Message = "当前分组上游负载已饱和,请稍后再试" middleware.SetupContextForSelectedChannel(c, channel, originalModel)
} requestBody, err := common.GetRequestBody(c)
err.Error.Message = helper.MessageWithRequestId(err.Error.Message, requestId) c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
c.JSON(err.StatusCode, gin.H{ bizErr = relay(c, relayMode)
"error": err.Error, if bizErr == nil {
}) return
} }
channelId := c.GetInt("channel_id") channelId := c.GetInt("channel_id")
logger.Error(c.Request.Context(), fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message)) lastFailedChannelId = channelId
// https://platform.openai.com/docs/guides/error-codes/api-errors channelName := c.GetString("channel_name")
if util.ShouldDisableChannel(&err.Error, err.StatusCode) { go processChannelRelayError(ctx, channelId, channelName, bizErr)
channelId := c.GetInt("channel_id") }
channelName := c.GetString("channel_name") if bizErr != nil {
disableChannel(channelId, channelName, err.Message) if bizErr.StatusCode == http.StatusTooManyRequests {
bizErr.Error.Message = "当前分组上游负载已饱和,请稍后再试"
} }
bizErr.Error.Message = helper.MessageWithRequestId(bizErr.Error.Message, requestId)
c.JSON(bizErr.StatusCode, gin.H{
"error": bizErr.Error,
})
}
}
func shouldRetry(c *gin.Context, statusCode int) bool {
if _, ok := c.Get("specific_channel_id"); ok {
return false
}
if statusCode == http.StatusTooManyRequests {
return true
}
if statusCode/100 == 5 {
return true
}
if statusCode == http.StatusBadRequest {
return false
}
if statusCode/100 == 2 {
return false
}
return true
}
func processChannelRelayError(ctx context.Context, channelId int, channelName string, err *model.ErrorWithStatusCode) {
logger.Errorf(ctx, "relay error (channel #%d): %s", channelId, err.Message)
// https://platform.openai.com/docs/guides/error-codes/api-errors
if util.ShouldDisableChannel(&err.Error, err.StatusCode) {
disableChannel(channelId, channelName, err.Message)
} }
} }

View File

@@ -456,6 +456,7 @@
"已绑定的邮箱账户": "Email Account Bound", "已绑定的邮箱账户": "Email Account Bound",
"用户信息更新成功!": "User information updated successfully!", "用户信息更新成功!": "User information updated successfully!",
"模型倍率 %.2f,分组倍率 %.2f": "model rate %.2f, group rate %.2f", "模型倍率 %.2f,分组倍率 %.2f": "model rate %.2f, group rate %.2f",
"模型倍率 %.2f,分组倍率 %.2f,补全倍率 %.2f": "model rate %.2f, group rate %.2f, completion rate %.2f",
"使用明细(总消耗额度:{renderQuota(stat.quota)}": "Usage Details (Total Consumption Quota: {renderQuota(stat.quota)})", "使用明细(总消耗额度:{renderQuota(stat.quota)}": "Usage Details (Total Consumption Quota: {renderQuota(stat.quota)})",
"用户名称": "User Name", "用户名称": "User Name",
"令牌名称": "Token Name", "令牌名称": "Token Name",

View File

@@ -108,7 +108,7 @@ func TokenAuth() func(c *gin.Context) {
c.Set("token_name", token.Name) c.Set("token_name", token.Name)
if len(parts) > 1 { if len(parts) > 1 {
if model.IsAdmin(token.UserId) { if model.IsAdmin(token.UserId) {
c.Set("channelId", parts[1]) c.Set("specific_channel_id", parts[1])
} else { } else {
abortWithMessage(c, http.StatusForbidden, "普通用户不支持指定渠道") abortWithMessage(c, http.StatusForbidden, "普通用户不支持指定渠道")
return return

View File

@@ -21,8 +21,9 @@ func Distribute() func(c *gin.Context) {
userId := c.GetInt("id") userId := c.GetInt("id")
userGroup, _ := model.CacheGetUserGroup(userId) userGroup, _ := model.CacheGetUserGroup(userId)
c.Set("group", userGroup) c.Set("group", userGroup)
var requestModel string
var channel *model.Channel var channel *model.Channel
channelId, ok := c.Get("channelId") channelId, ok := c.Get("specific_channel_id")
if ok { if ok {
id, err := strconv.Atoi(channelId.(string)) id, err := strconv.Atoi(channelId.(string))
if err != nil { if err != nil {
@@ -66,6 +67,7 @@ func Distribute() func(c *gin.Context) {
modelRequest.Model = "whisper-1" modelRequest.Model = "whisper-1"
} }
} }
requestModel = modelRequest.Model
channel, err = model.CacheGetRandomSatisfiedChannel(userGroup, modelRequest.Model) channel, err = model.CacheGetRandomSatisfiedChannel(userGroup, modelRequest.Model)
if err != nil { if err != nil {
message := fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道", userGroup, modelRequest.Model) message := fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道", userGroup, modelRequest.Model)
@@ -77,29 +79,34 @@ func Distribute() func(c *gin.Context) {
return return
} }
} }
c.Set("channel", channel.Type) SetupContextForSelectedChannel(c, channel, requestModel)
c.Set("channel_id", channel.Id)
c.Set("channel_name", channel.Name)
c.Set("model_mapping", channel.GetModelMapping())
c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
c.Set("base_url", channel.GetBaseURL())
// this is for backward compatibility
switch channel.Type {
case common.ChannelTypeAzure:
c.Set(common.ConfigKeyAPIVersion, channel.Other)
case common.ChannelTypeXunfei:
c.Set(common.ConfigKeyAPIVersion, channel.Other)
case common.ChannelTypeGemini:
c.Set(common.ConfigKeyAPIVersion, channel.Other)
case common.ChannelTypeAIProxyLibrary:
c.Set(common.ConfigKeyLibraryID, channel.Other)
case common.ChannelTypeAli:
c.Set(common.ConfigKeyPlugin, channel.Other)
}
cfg, _ := channel.LoadConfig()
for k, v := range cfg {
c.Set(common.ConfigKeyPrefix+k, v)
}
c.Next() c.Next()
} }
} }
func SetupContextForSelectedChannel(c *gin.Context, channel *model.Channel, modelName string) {
c.Set("channel", channel.Type)
c.Set("channel_id", channel.Id)
c.Set("channel_name", channel.Name)
c.Set("model_mapping", channel.GetModelMapping())
c.Set("original_model", modelName) // for retry
c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
c.Set("base_url", channel.GetBaseURL())
// this is for backward compatibility
switch channel.Type {
case common.ChannelTypeAzure:
c.Set(common.ConfigKeyAPIVersion, channel.Other)
case common.ChannelTypeXunfei:
c.Set(common.ConfigKeyAPIVersion, channel.Other)
case common.ChannelTypeGemini:
c.Set(common.ConfigKeyAPIVersion, channel.Other)
case common.ChannelTypeAIProxyLibrary:
c.Set(common.ConfigKeyLibraryID, channel.Other)
case common.ChannelTypeAli:
c.Set(common.ConfigKeyPlugin, channel.Other)
}
cfg, _ := channel.LoadConfig()
for k, v := range cfg {
c.Set(common.ConfigKeyPrefix+k, v)
}
}

View File

@@ -9,7 +9,7 @@ import (
func RequestId() func(c *gin.Context) { func RequestId() func(c *gin.Context) {
return func(c *gin.Context) { return func(c *gin.Context) {
id := helper.GetTimeString() + helper.GetRandomString(8) id := helper.GetTimeString() + helper.GetRandomNumberString(8)
c.Set(logger.RequestIdKey, id) c.Set(logger.RequestIdKey, id)
ctx := context.WithValue(c.Request.Context(), logger.RequestIdKey, id) ctx := context.WithValue(c.Request.Context(), logger.RequestIdKey, id)
c.Request = c.Request.WithContext(ctx) c.Request = c.Request.WithContext(ctx)

View File

@@ -94,7 +94,7 @@ func CacheUpdateUserQuota(id int) error {
if !common.RedisEnabled { if !common.RedisEnabled {
return nil return nil
} }
quota, err := GetUserQuota(id) quota, err := CacheGetUserQuota(id)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -53,7 +53,7 @@ func responseAIProxyLibrary2OpenAI(response *LibraryResponse) *openai.TextRespon
FinishReason: "stop", FinishReason: "stop",
} }
fullTextResponse := openai.TextResponse{ fullTextResponse := openai.TextResponse{
Id: helper.GetUUID(), Id: fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
Object: "chat.completion", Object: "chat.completion",
Created: helper.GetTimestamp(), Created: helper.GetTimestamp(),
Choices: []openai.TextResponseChoice{choice}, Choices: []openai.TextResponseChoice{choice},
@@ -66,7 +66,7 @@ func documentsAIProxyLibrary(documents []LibraryDocument) *openai.ChatCompletion
choice.Delta.Content = aiProxyDocuments2Markdown(documents) choice.Delta.Content = aiProxyDocuments2Markdown(documents)
choice.FinishReason = &constant.StopFinishReason choice.FinishReason = &constant.StopFinishReason
return &openai.ChatCompletionsStreamResponse{ return &openai.ChatCompletionsStreamResponse{
Id: helper.GetUUID(), Id: fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
Object: "chat.completion.chunk", Object: "chat.completion.chunk",
Created: helper.GetTimestamp(), Created: helper.GetTimestamp(),
Model: "", Model: "",
@@ -78,7 +78,7 @@ func streamResponseAIProxyLibrary2OpenAI(response *LibraryStreamResponse) *opena
var choice openai.ChatCompletionsStreamResponseChoice var choice openai.ChatCompletionsStreamResponseChoice
choice.Delta.Content = response.Content choice.Delta.Content = response.Content
return &openai.ChatCompletionsStreamResponse{ return &openai.ChatCompletionsStreamResponse{
Id: helper.GetUUID(), Id: fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
Object: "chat.completion.chunk", Object: "chat.completion.chunk",
Created: helper.GetTimestamp(), Created: helper.GetTimestamp(),
Model: response.Model, Model: response.Model,

View File

@@ -118,8 +118,10 @@ type ImageResponse struct {
} }
type ChatCompletionsStreamResponseChoice struct { type ChatCompletionsStreamResponseChoice struct {
Index int `json:"index"`
Delta struct { Delta struct {
Content string `json:"content"` Content string `json:"content"`
Role string `json:"role,omitempty"`
} `json:"delta"` } `json:"delta"`
FinishReason *string `json:"finish_reason,omitempty"` FinishReason *string `json:"finish_reason,omitempty"`
} }

View File

@@ -2,4 +2,8 @@ package xunfei
var ModelList = []string{ var ModelList = []string{
"SparkDesk", "SparkDesk",
"SparkDesk-v1.1",
"SparkDesk-v2.1",
"SparkDesk-v3.1",
"SparkDesk-v3.5",
} }

View File

@@ -70,6 +70,7 @@ func responseXunfei2OpenAI(response *ChatResponse) *openai.TextResponse {
FinishReason: constant.StopFinishReason, FinishReason: constant.StopFinishReason,
} }
fullTextResponse := openai.TextResponse{ fullTextResponse := openai.TextResponse{
Id: fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
Object: "chat.completion", Object: "chat.completion",
Created: helper.GetTimestamp(), Created: helper.GetTimestamp(),
Choices: []openai.TextResponseChoice{choice}, Choices: []openai.TextResponseChoice{choice},
@@ -92,6 +93,7 @@ func streamResponseXunfei2OpenAI(xunfeiResponse *ChatResponse) *openai.ChatCompl
choice.FinishReason = &constant.StopFinishReason choice.FinishReason = &constant.StopFinishReason
} }
response := openai.ChatCompletionsStreamResponse{ response := openai.ChatCompletionsStreamResponse{
Id: fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
Object: "chat.completion.chunk", Object: "chat.completion.chunk",
Created: helper.GetTimestamp(), Created: helper.GetTimestamp(),
Model: "SparkDesk", Model: "SparkDesk",
@@ -127,7 +129,7 @@ func buildXunfeiAuthUrl(hostUrl string, apiKey, apiSecret string) string {
} }
func StreamHandler(c *gin.Context, textRequest model.GeneralOpenAIRequest, appId string, apiSecret string, apiKey string) (*model.ErrorWithStatusCode, *model.Usage) { func StreamHandler(c *gin.Context, textRequest model.GeneralOpenAIRequest, appId string, apiSecret string, apiKey string) (*model.ErrorWithStatusCode, *model.Usage) {
domain, authUrl := getXunfeiAuthUrl(c, apiKey, apiSecret) domain, authUrl := getXunfeiAuthUrl(c, apiKey, apiSecret, textRequest.Model)
dataChan, stopChan, err := xunfeiMakeRequest(textRequest, domain, authUrl, appId) dataChan, stopChan, err := xunfeiMakeRequest(textRequest, domain, authUrl, appId)
if err != nil { if err != nil {
return openai.ErrorWrapper(err, "make xunfei request err", http.StatusInternalServerError), nil return openai.ErrorWrapper(err, "make xunfei request err", http.StatusInternalServerError), nil
@@ -157,7 +159,7 @@ func StreamHandler(c *gin.Context, textRequest model.GeneralOpenAIRequest, appId
} }
func Handler(c *gin.Context, textRequest model.GeneralOpenAIRequest, appId string, apiSecret string, apiKey string) (*model.ErrorWithStatusCode, *model.Usage) { func Handler(c *gin.Context, textRequest model.GeneralOpenAIRequest, appId string, apiSecret string, apiKey string) (*model.ErrorWithStatusCode, *model.Usage) {
domain, authUrl := getXunfeiAuthUrl(c, apiKey, apiSecret) domain, authUrl := getXunfeiAuthUrl(c, apiKey, apiSecret, textRequest.Model)
dataChan, stopChan, err := xunfeiMakeRequest(textRequest, domain, authUrl, appId) dataChan, stopChan, err := xunfeiMakeRequest(textRequest, domain, authUrl, appId)
if err != nil { if err != nil {
return openai.ErrorWrapper(err, "make xunfei request err", http.StatusInternalServerError), nil return openai.ErrorWrapper(err, "make xunfei request err", http.StatusInternalServerError), nil
@@ -242,20 +244,45 @@ func xunfeiMakeRequest(textRequest model.GeneralOpenAIRequest, domain, authUrl,
return dataChan, stopChan, nil return dataChan, stopChan, nil
} }
func getXunfeiAuthUrl(c *gin.Context, apiKey string, apiSecret string) (string, string) { func getAPIVersion(c *gin.Context, modelName string) string {
query := c.Request.URL.Query() query := c.Request.URL.Query()
apiVersion := query.Get("api-version") apiVersion := query.Get("api-version")
if apiVersion == "" { if apiVersion != "" {
apiVersion = c.GetString(common.ConfigKeyAPIVersion) return apiVersion
} }
if apiVersion == "" { parts := strings.Split(modelName, "-")
apiVersion = "v1.1" if len(parts) == 2 {
logger.SysLog("api_version not found, use default: " + apiVersion) apiVersion = parts[1]
return apiVersion
} }
domain := "general" apiVersion = c.GetString(common.ConfigKeyAPIVersion)
if apiVersion != "v1.1" { if apiVersion != "" {
domain += strings.Split(apiVersion, ".")[0] return apiVersion
} }
apiVersion = "v1.1"
logger.SysLog("api_version not found, using default: " + apiVersion)
return apiVersion
}
// https://www.xfyun.cn/doc/spark/Web.html#_1-%E6%8E%A5%E5%8F%A3%E8%AF%B4%E6%98%8E
func apiVersion2domain(apiVersion string) string {
switch apiVersion {
case "v1.1":
return "general"
case "v2.1":
return "generalv2"
case "v3.1":
return "generalv3"
case "v3.5":
return "generalv3.5"
}
return "general" + apiVersion
}
func getXunfeiAuthUrl(c *gin.Context, apiKey string, apiSecret string, modelName string) (string, string) {
apiVersion := getAPIVersion(c, modelName)
domain := apiVersion2domain(apiVersion)
authUrl := buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/%s/chat", apiVersion), apiKey, apiSecret) authUrl := buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/%s/chat", apiVersion), apiKey, apiSecret)
return domain, authUrl return domain, authUrl
} }

View File

@@ -39,6 +39,7 @@ func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode {
ratio := modelRatio * groupRatio ratio := modelRatio * groupRatio
// pre-consume quota // pre-consume quota
promptTokens := getPromptTokens(textRequest, meta.Mode) promptTokens := getPromptTokens(textRequest, meta.Mode)
meta.PromptTokens = promptTokens
preConsumedQuota, bizErr := preConsumeQuota(ctx, textRequest, promptTokens, ratio, meta) preConsumedQuota, bizErr := preConsumeQuota(ctx, textRequest, promptTokens, ratio, meta)
if bizErr != nil { if bizErr != nil {
logger.Warnf(ctx, "preConsumeQuota failed: %+v", *bizErr) logger.Warnf(ctx, "preConsumeQuota failed: %+v", *bizErr)

View File

@@ -1,2 +1,2 @@
default default
berry berry

View File

@@ -227,9 +227,9 @@ export default function ChannelPage() {
<Button onClick={testAllChannels} startIcon={<IconBrandSpeedtest width={'18px'} />}> <Button onClick={testAllChannels} startIcon={<IconBrandSpeedtest width={'18px'} />}>
测试启用渠道 测试启用渠道
</Button> </Button>
<Button onClick={updateAllChannelsBalance} startIcon={<IconCoinYuan width={'18px'} />}> {/*<Button onClick={updateAllChannelsBalance} startIcon={<IconCoinYuan width={'18px'} />}>*/}
更新启用余额 {/* 更新启用余额*/}
</Button> {/*</Button>*/}
<Button onClick={deleteAllDisabledChannels} startIcon={<IconHttpDelete width={'18px'} />}> <Button onClick={deleteAllDisabledChannels} startIcon={<IconHttpDelete width={'18px'} />}>
删除禁用渠道 删除禁用渠道
</Button> </Button>

View File

@@ -94,7 +94,13 @@ const typeConfig = {
other: "版本号", other: "版本号",
}, },
input: { input: {
models: ["SparkDesk"], models: [
"SparkDesk",
'SparkDesk-v1.1',
'SparkDesk-v2.1',
'SparkDesk-v3.1',
'SparkDesk-v3.5'
],
}, },
prompt: { prompt: {
key: "按照如下格式输入APPID|APISecret|APIKey", key: "按照如下格式输入APPID|APISecret|APIKey",

View File

@@ -1,13 +1,13 @@
#!/bin/sh #!/bin/sh
version=$(cat VERSION) version=$(cat VERSION)
themes=$(cat THEMES) pwd
IFS=$'\n'
for theme in $themes; do while IFS= read -r theme; do
echo "Building theme: $theme" echo "Building theme: $theme"
cd $theme rm -r build/$theme
cd "$theme"
npm install npm install
DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$version npm run build DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$version npm run build
cd .. cd ..
done done < THEMES

View File

@@ -522,8 +522,8 @@ const ChannelsTable = () => {
<Button size='small' loading={loading} onClick={testAllChannels}> <Button size='small' loading={loading} onClick={testAllChannels}>
测试所有渠道 测试所有渠道
</Button> </Button>
<Button size='small' onClick={updateAllChannelsBalance} {/*<Button size='small' onClick={updateAllChannelsBalance}*/}
loading={loading || updatingBalance}>更新已启用渠道余额</Button> {/* loading={loading || updatingBalance}>更新已启用渠道余额</Button>*/}
<Popup <Popup
trigger={ trigger={
<Button size='small' loading={loading}> <Button size='small' loading={loading}>

View File

@@ -82,7 +82,13 @@ const EditChannel = () => {
localModels = ['chatglm_turbo', 'chatglm_pro', 'chatglm_std', 'chatglm_lite']; localModels = ['chatglm_turbo', 'chatglm_pro', 'chatglm_std', 'chatglm_lite'];
break; break;
case 18: case 18:
localModels = ['SparkDesk']; localModels = [
'SparkDesk',
'SparkDesk-v1.1',
'SparkDesk-v2.1',
'SparkDesk-v3.1',
'SparkDesk-v3.5'
];
break; break;
case 19: case 19:
localModels = ['360GPT_S2_V9', 'embedding-bert-512-v1', 'embedding_s1_v1', 'semantic_similarity_s1_v1']; localModels = ['360GPT_S2_V9', 'embedding-bert-512-v1', 'embedding_s1_v1', 'semantic_similarity_s1_v1'];