mirror of
				https://github.com/songquanpeng/one-api.git
				synced 2025-11-04 15:53:42 +08:00 
			
		
		
		
	Compare commits
	
		
			12 Commits
		
	
	
		
			v0.6.0-alp
			...
			v0.6.0-alp
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					b747cdbc6f | ||
| 
						 | 
					6b27d6659a | ||
| 
						 | 
					dc5b781191 | ||
| 
						 | 
					c880b4a9a3 | ||
| 
						 | 
					565ea58e68 | ||
| 
						 | 
					f141a37a9e | ||
| 
						 | 
					5b78886ad3 | ||
| 
						 | 
					87c7c4f0e6 | ||
| 
						 | 
					4c4a873890 | ||
| 
						 | 
					0664bdfda1 | ||
| 
						 | 
					32387d9c20 | ||
| 
						 | 
					bd888f2eb7 | 
							
								
								
									
										2
									
								
								.github/workflows/linux-release.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/linux-release.yml
									
									
									
									
										vendored
									
									
								
							@@ -23,7 +23,7 @@ jobs:
 | 
			
		||||
      - uses: actions/setup-node@v3
 | 
			
		||||
        with:
 | 
			
		||||
          node-version: 16
 | 
			
		||||
      - name: Build Frontend (theme default)
 | 
			
		||||
      - name: Build Frontend
 | 
			
		||||
        env:
 | 
			
		||||
          CI: ""
 | 
			
		||||
        run: |
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/macos-release.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/macos-release.yml
									
									
									
									
										vendored
									
									
								
							@@ -23,7 +23,7 @@ jobs:
 | 
			
		||||
      - uses: actions/setup-node@v3
 | 
			
		||||
        with:
 | 
			
		||||
          node-version: 16
 | 
			
		||||
      - name: Build Frontend (theme default)
 | 
			
		||||
      - name: Build Frontend
 | 
			
		||||
        env:
 | 
			
		||||
          CI: ""
 | 
			
		||||
        run: |
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/windows-release.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/windows-release.yml
									
									
									
									
										vendored
									
									
								
							@@ -26,7 +26,7 @@ jobs:
 | 
			
		||||
      - uses: actions/setup-node@v3
 | 
			
		||||
        with:
 | 
			
		||||
          node-version: 16
 | 
			
		||||
      - name: Build Frontend (theme default)
 | 
			
		||||
      - name: Build Frontend
 | 
			
		||||
        env:
 | 
			
		||||
          CI: ""
 | 
			
		||||
        run: |
 | 
			
		||||
 
 | 
			
		||||
@@ -23,7 +23,7 @@ ADD go.mod go.sum ./
 | 
			
		||||
RUN go mod download
 | 
			
		||||
COPY . .
 | 
			
		||||
COPY --from=builder /web/build ./web/build
 | 
			
		||||
RUN go build -ldflags "-s -w -X 'one-api/common.Version=$(cat VERSION)' -extldflags '-static'" -o one-api
 | 
			
		||||
RUN go build -ldflags "-s -w -X 'github.com/songquanpeng/one-api/common.Version=$(cat VERSION)' -extldflags '-static'" -o one-api
 | 
			
		||||
 | 
			
		||||
FROM alpine
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -8,12 +8,24 @@ import (
 | 
			
		||||
	"strings"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func UnmarshalBodyReusable(c *gin.Context, v any) error {
 | 
			
		||||
const KeyRequestBody = "key_request_body"
 | 
			
		||||
 | 
			
		||||
func GetRequestBody(c *gin.Context) ([]byte, error) {
 | 
			
		||||
	requestBody, _ := c.Get(KeyRequestBody)
 | 
			
		||||
	if requestBody != nil {
 | 
			
		||||
		return requestBody.([]byte), nil
 | 
			
		||||
	}
 | 
			
		||||
	requestBody, err := io.ReadAll(c.Request.Body)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	err = c.Request.Body.Close()
 | 
			
		||||
	_ = c.Request.Body.Close()
 | 
			
		||||
	c.Set(KeyRequestBody, requestBody)
 | 
			
		||||
	return requestBody.([]byte), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func UnmarshalBodyReusable(c *gin.Context, v any) error {
 | 
			
		||||
	requestBody, err := GetRequestBody(c)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -137,6 +137,7 @@ func GetUUID() string {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const keyChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
 | 
			
		||||
const keyNumbers = "0123456789"
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	rand.Seed(time.Now().UnixNano())
 | 
			
		||||
@@ -168,6 +169,15 @@ func GetRandomString(length int) string {
 | 
			
		||||
	return string(key)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GetRandomNumberString(length int) string {
 | 
			
		||||
	rand.Seed(time.Now().UnixNano())
 | 
			
		||||
	key := make([]byte, length)
 | 
			
		||||
	for i := 0; i < length; i++ {
 | 
			
		||||
		key[i] = keyNumbers[rand.Intn(len(keyNumbers))]
 | 
			
		||||
	}
 | 
			
		||||
	return string(key)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GetTimestamp() int64 {
 | 
			
		||||
	return time.Now().Unix()
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,23 +1,27 @@
 | 
			
		||||
package controller
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/middleware"
 | 
			
		||||
	dbmodel "github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/constant"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/controller"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/util"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// https://platform.openai.com/docs/api-reference/chat
 | 
			
		||||
 | 
			
		||||
func Relay(c *gin.Context) {
 | 
			
		||||
	relayMode := constant.Path2RelayMode(c.Request.URL.Path)
 | 
			
		||||
func relay(c *gin.Context, relayMode int) *model.ErrorWithStatusCode {
 | 
			
		||||
	var err *model.ErrorWithStatusCode
 | 
			
		||||
	switch relayMode {
 | 
			
		||||
	case constant.RelayModeImagesGenerations:
 | 
			
		||||
@@ -31,32 +35,85 @@ func Relay(c *gin.Context) {
 | 
			
		||||
	default:
 | 
			
		||||
		err = controller.RelayTextHelper(c)
 | 
			
		||||
	}
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		requestId := c.GetString(logger.RequestIdKey)
 | 
			
		||||
		retryTimesStr := c.Query("retry")
 | 
			
		||||
		retryTimes, _ := strconv.Atoi(retryTimesStr)
 | 
			
		||||
		if retryTimesStr == "" {
 | 
			
		||||
			retryTimes = config.RetryTimes
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Relay(c *gin.Context) {
 | 
			
		||||
	ctx := c.Request.Context()
 | 
			
		||||
	relayMode := constant.Path2RelayMode(c.Request.URL.Path)
 | 
			
		||||
	bizErr := relay(c, relayMode)
 | 
			
		||||
	if bizErr == nil {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	channelId := c.GetInt("channel_id")
 | 
			
		||||
	lastFailedChannelId := channelId
 | 
			
		||||
	channelName := c.GetString("channel_name")
 | 
			
		||||
	group := c.GetString("group")
 | 
			
		||||
	originalModel := c.GetString("original_model")
 | 
			
		||||
	go processChannelRelayError(ctx, channelId, channelName, bizErr)
 | 
			
		||||
	requestId := c.GetString(logger.RequestIdKey)
 | 
			
		||||
	retryTimes := config.RetryTimes
 | 
			
		||||
	if !shouldRetry(c, bizErr.StatusCode) {
 | 
			
		||||
		logger.Errorf(ctx, "relay error happen, status code is %d, won't retry in this case", bizErr.StatusCode)
 | 
			
		||||
		retryTimes = 0
 | 
			
		||||
	}
 | 
			
		||||
	for i := retryTimes; i > 0; i-- {
 | 
			
		||||
		channel, err := dbmodel.CacheGetRandomSatisfiedChannel(group, originalModel)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			logger.Errorf(ctx, "CacheGetRandomSatisfiedChannel failed: %w", err)
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
		if retryTimes > 0 {
 | 
			
		||||
			c.Redirect(http.StatusTemporaryRedirect, fmt.Sprintf("%s?retry=%d", c.Request.URL.Path, retryTimes-1))
 | 
			
		||||
		} else {
 | 
			
		||||
			if err.StatusCode == http.StatusTooManyRequests {
 | 
			
		||||
				err.Error.Message = "当前分组上游负载已饱和,请稍后再试"
 | 
			
		||||
			}
 | 
			
		||||
			err.Error.Message = helper.MessageWithRequestId(err.Error.Message, requestId)
 | 
			
		||||
			c.JSON(err.StatusCode, gin.H{
 | 
			
		||||
				"error": err.Error,
 | 
			
		||||
			})
 | 
			
		||||
		logger.Infof(ctx, "using channel #%d to retry (remain times %d)", channel.Id, i)
 | 
			
		||||
		if channel.Id == lastFailedChannelId {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		middleware.SetupContextForSelectedChannel(c, channel, originalModel)
 | 
			
		||||
		requestBody, err := common.GetRequestBody(c)
 | 
			
		||||
		c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
 | 
			
		||||
		bizErr = relay(c, relayMode)
 | 
			
		||||
		if bizErr == nil {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		channelId := c.GetInt("channel_id")
 | 
			
		||||
		logger.Error(c.Request.Context(), fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message))
 | 
			
		||||
		// https://platform.openai.com/docs/guides/error-codes/api-errors
 | 
			
		||||
		if util.ShouldDisableChannel(&err.Error, err.StatusCode) {
 | 
			
		||||
			channelId := c.GetInt("channel_id")
 | 
			
		||||
			channelName := c.GetString("channel_name")
 | 
			
		||||
			disableChannel(channelId, channelName, err.Message)
 | 
			
		||||
		lastFailedChannelId = channelId
 | 
			
		||||
		channelName := c.GetString("channel_name")
 | 
			
		||||
		go processChannelRelayError(ctx, channelId, channelName, bizErr)
 | 
			
		||||
	}
 | 
			
		||||
	if bizErr != nil {
 | 
			
		||||
		if bizErr.StatusCode == http.StatusTooManyRequests {
 | 
			
		||||
			bizErr.Error.Message = "当前分组上游负载已饱和,请稍后再试"
 | 
			
		||||
		}
 | 
			
		||||
		bizErr.Error.Message = helper.MessageWithRequestId(bizErr.Error.Message, requestId)
 | 
			
		||||
		c.JSON(bizErr.StatusCode, gin.H{
 | 
			
		||||
			"error": bizErr.Error,
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func shouldRetry(c *gin.Context, statusCode int) bool {
 | 
			
		||||
	if _, ok := c.Get("specific_channel_id"); ok {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	if statusCode == http.StatusTooManyRequests {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	if statusCode/100 == 5 {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	if statusCode == http.StatusBadRequest {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	if statusCode/100 == 2 {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func processChannelRelayError(ctx context.Context, channelId int, channelName string, err *model.ErrorWithStatusCode) {
 | 
			
		||||
	logger.Errorf(ctx, "relay error (channel #%d): %s", channelId, err.Message)
 | 
			
		||||
	// https://platform.openai.com/docs/guides/error-codes/api-errors
 | 
			
		||||
	if util.ShouldDisableChannel(&err.Error, err.StatusCode) {
 | 
			
		||||
		disableChannel(channelId, channelName, err.Message)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -456,6 +456,7 @@
 | 
			
		||||
  "已绑定的邮箱账户": "Email Account Bound",
 | 
			
		||||
  "用户信息更新成功!": "User information updated successfully!",
 | 
			
		||||
  "模型倍率 %.2f,分组倍率 %.2f": "model rate %.2f, group rate %.2f",
 | 
			
		||||
  "模型倍率 %.2f,分组倍率 %.2f,补全倍率 %.2f": "model rate %.2f, group rate %.2f, completion rate %.2f",
 | 
			
		||||
  "使用明细(总消耗额度:{renderQuota(stat.quota)})": "Usage Details (Total Consumption Quota: {renderQuota(stat.quota)})",
 | 
			
		||||
  "用户名称": "User Name",
 | 
			
		||||
  "令牌名称": "Token Name",
 | 
			
		||||
 
 | 
			
		||||
@@ -108,7 +108,7 @@ func TokenAuth() func(c *gin.Context) {
 | 
			
		||||
		c.Set("token_name", token.Name)
 | 
			
		||||
		if len(parts) > 1 {
 | 
			
		||||
			if model.IsAdmin(token.UserId) {
 | 
			
		||||
				c.Set("channelId", parts[1])
 | 
			
		||||
				c.Set("specific_channel_id", parts[1])
 | 
			
		||||
			} else {
 | 
			
		||||
				abortWithMessage(c, http.StatusForbidden, "普通用户不支持指定渠道")
 | 
			
		||||
				return
 | 
			
		||||
 
 | 
			
		||||
@@ -21,8 +21,9 @@ func Distribute() func(c *gin.Context) {
 | 
			
		||||
		userId := c.GetInt("id")
 | 
			
		||||
		userGroup, _ := model.CacheGetUserGroup(userId)
 | 
			
		||||
		c.Set("group", userGroup)
 | 
			
		||||
		var requestModel string
 | 
			
		||||
		var channel *model.Channel
 | 
			
		||||
		channelId, ok := c.Get("channelId")
 | 
			
		||||
		channelId, ok := c.Get("specific_channel_id")
 | 
			
		||||
		if ok {
 | 
			
		||||
			id, err := strconv.Atoi(channelId.(string))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
@@ -66,6 +67,7 @@ func Distribute() func(c *gin.Context) {
 | 
			
		||||
					modelRequest.Model = "whisper-1"
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			requestModel = modelRequest.Model
 | 
			
		||||
			channel, err = model.CacheGetRandomSatisfiedChannel(userGroup, modelRequest.Model)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				message := fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道", userGroup, modelRequest.Model)
 | 
			
		||||
@@ -77,29 +79,34 @@ func Distribute() func(c *gin.Context) {
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		c.Set("channel", channel.Type)
 | 
			
		||||
		c.Set("channel_id", channel.Id)
 | 
			
		||||
		c.Set("channel_name", channel.Name)
 | 
			
		||||
		c.Set("model_mapping", channel.GetModelMapping())
 | 
			
		||||
		c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
 | 
			
		||||
		c.Set("base_url", channel.GetBaseURL())
 | 
			
		||||
		// this is for backward compatibility
 | 
			
		||||
		switch channel.Type {
 | 
			
		||||
		case common.ChannelTypeAzure:
 | 
			
		||||
			c.Set(common.ConfigKeyAPIVersion, channel.Other)
 | 
			
		||||
		case common.ChannelTypeXunfei:
 | 
			
		||||
			c.Set(common.ConfigKeyAPIVersion, channel.Other)
 | 
			
		||||
		case common.ChannelTypeGemini:
 | 
			
		||||
			c.Set(common.ConfigKeyAPIVersion, channel.Other)
 | 
			
		||||
		case common.ChannelTypeAIProxyLibrary:
 | 
			
		||||
			c.Set(common.ConfigKeyLibraryID, channel.Other)
 | 
			
		||||
		case common.ChannelTypeAli:
 | 
			
		||||
			c.Set(common.ConfigKeyPlugin, channel.Other)
 | 
			
		||||
		}
 | 
			
		||||
		cfg, _ := channel.LoadConfig()
 | 
			
		||||
		for k, v := range cfg {
 | 
			
		||||
			c.Set(common.ConfigKeyPrefix+k, v)
 | 
			
		||||
		}
 | 
			
		||||
		SetupContextForSelectedChannel(c, channel, requestModel)
 | 
			
		||||
		c.Next()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func SetupContextForSelectedChannel(c *gin.Context, channel *model.Channel, modelName string) {
 | 
			
		||||
	c.Set("channel", channel.Type)
 | 
			
		||||
	c.Set("channel_id", channel.Id)
 | 
			
		||||
	c.Set("channel_name", channel.Name)
 | 
			
		||||
	c.Set("model_mapping", channel.GetModelMapping())
 | 
			
		||||
	c.Set("original_model", modelName) // for retry
 | 
			
		||||
	c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
 | 
			
		||||
	c.Set("base_url", channel.GetBaseURL())
 | 
			
		||||
	// this is for backward compatibility
 | 
			
		||||
	switch channel.Type {
 | 
			
		||||
	case common.ChannelTypeAzure:
 | 
			
		||||
		c.Set(common.ConfigKeyAPIVersion, channel.Other)
 | 
			
		||||
	case common.ChannelTypeXunfei:
 | 
			
		||||
		c.Set(common.ConfigKeyAPIVersion, channel.Other)
 | 
			
		||||
	case common.ChannelTypeGemini:
 | 
			
		||||
		c.Set(common.ConfigKeyAPIVersion, channel.Other)
 | 
			
		||||
	case common.ChannelTypeAIProxyLibrary:
 | 
			
		||||
		c.Set(common.ConfigKeyLibraryID, channel.Other)
 | 
			
		||||
	case common.ChannelTypeAli:
 | 
			
		||||
		c.Set(common.ConfigKeyPlugin, channel.Other)
 | 
			
		||||
	}
 | 
			
		||||
	cfg, _ := channel.LoadConfig()
 | 
			
		||||
	for k, v := range cfg {
 | 
			
		||||
		c.Set(common.ConfigKeyPrefix+k, v)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -9,7 +9,7 @@ import (
 | 
			
		||||
 | 
			
		||||
func RequestId() func(c *gin.Context) {
 | 
			
		||||
	return func(c *gin.Context) {
 | 
			
		||||
		id := helper.GetTimeString() + helper.GetRandomString(8)
 | 
			
		||||
		id := helper.GetTimeString() + helper.GetRandomNumberString(8)
 | 
			
		||||
		c.Set(logger.RequestIdKey, id)
 | 
			
		||||
		ctx := context.WithValue(c.Request.Context(), logger.RequestIdKey, id)
 | 
			
		||||
		c.Request = c.Request.WithContext(ctx)
 | 
			
		||||
 
 | 
			
		||||
@@ -94,7 +94,7 @@ func CacheUpdateUserQuota(id int) error {
 | 
			
		||||
	if !common.RedisEnabled {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	quota, err := GetUserQuota(id)
 | 
			
		||||
	quota, err := CacheGetUserQuota(id)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -53,7 +53,7 @@ func responseAIProxyLibrary2OpenAI(response *LibraryResponse) *openai.TextRespon
 | 
			
		||||
		FinishReason: "stop",
 | 
			
		||||
	}
 | 
			
		||||
	fullTextResponse := openai.TextResponse{
 | 
			
		||||
		Id:      helper.GetUUID(),
 | 
			
		||||
		Id:      fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
 | 
			
		||||
		Object:  "chat.completion",
 | 
			
		||||
		Created: helper.GetTimestamp(),
 | 
			
		||||
		Choices: []openai.TextResponseChoice{choice},
 | 
			
		||||
@@ -66,7 +66,7 @@ func documentsAIProxyLibrary(documents []LibraryDocument) *openai.ChatCompletion
 | 
			
		||||
	choice.Delta.Content = aiProxyDocuments2Markdown(documents)
 | 
			
		||||
	choice.FinishReason = &constant.StopFinishReason
 | 
			
		||||
	return &openai.ChatCompletionsStreamResponse{
 | 
			
		||||
		Id:      helper.GetUUID(),
 | 
			
		||||
		Id:      fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
 | 
			
		||||
		Object:  "chat.completion.chunk",
 | 
			
		||||
		Created: helper.GetTimestamp(),
 | 
			
		||||
		Model:   "",
 | 
			
		||||
@@ -78,7 +78,7 @@ func streamResponseAIProxyLibrary2OpenAI(response *LibraryStreamResponse) *opena
 | 
			
		||||
	var choice openai.ChatCompletionsStreamResponseChoice
 | 
			
		||||
	choice.Delta.Content = response.Content
 | 
			
		||||
	return &openai.ChatCompletionsStreamResponse{
 | 
			
		||||
		Id:      helper.GetUUID(),
 | 
			
		||||
		Id:      fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
 | 
			
		||||
		Object:  "chat.completion.chunk",
 | 
			
		||||
		Created: helper.GetTimestamp(),
 | 
			
		||||
		Model:   response.Model,
 | 
			
		||||
 
 | 
			
		||||
@@ -118,8 +118,10 @@ type ImageResponse struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ChatCompletionsStreamResponseChoice struct {
 | 
			
		||||
	Index int `json:"index"`
 | 
			
		||||
	Delta struct {
 | 
			
		||||
		Content string `json:"content"`
 | 
			
		||||
		Role    string `json:"role,omitempty"`
 | 
			
		||||
	} `json:"delta"`
 | 
			
		||||
	FinishReason *string `json:"finish_reason,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -70,6 +70,7 @@ func responseXunfei2OpenAI(response *ChatResponse) *openai.TextResponse {
 | 
			
		||||
		FinishReason: constant.StopFinishReason,
 | 
			
		||||
	}
 | 
			
		||||
	fullTextResponse := openai.TextResponse{
 | 
			
		||||
		Id:      fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
 | 
			
		||||
		Object:  "chat.completion",
 | 
			
		||||
		Created: helper.GetTimestamp(),
 | 
			
		||||
		Choices: []openai.TextResponseChoice{choice},
 | 
			
		||||
@@ -92,6 +93,7 @@ func streamResponseXunfei2OpenAI(xunfeiResponse *ChatResponse) *openai.ChatCompl
 | 
			
		||||
		choice.FinishReason = &constant.StopFinishReason
 | 
			
		||||
	}
 | 
			
		||||
	response := openai.ChatCompletionsStreamResponse{
 | 
			
		||||
		Id:      fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
 | 
			
		||||
		Object:  "chat.completion.chunk",
 | 
			
		||||
		Created: helper.GetTimestamp(),
 | 
			
		||||
		Model:   "SparkDesk",
 | 
			
		||||
 
 | 
			
		||||
@@ -39,6 +39,7 @@ func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode {
 | 
			
		||||
	ratio := modelRatio * groupRatio
 | 
			
		||||
	// pre-consume quota
 | 
			
		||||
	promptTokens := getPromptTokens(textRequest, meta.Mode)
 | 
			
		||||
	meta.PromptTokens = promptTokens
 | 
			
		||||
	preConsumedQuota, bizErr := preConsumeQuota(ctx, textRequest, promptTokens, ratio, meta)
 | 
			
		||||
	if bizErr != nil {
 | 
			
		||||
		logger.Warnf(ctx, "preConsumeQuota failed: %+v", *bizErr)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										10
									
								
								web/build.sh
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								web/build.sh
									
									
									
									
									
								
							@@ -1,13 +1,13 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
version=$(cat VERSION)
 | 
			
		||||
themes=$(cat THEMES)
 | 
			
		||||
IFS=$'\n'
 | 
			
		||||
pwd
 | 
			
		||||
 | 
			
		||||
for theme in $themes; do
 | 
			
		||||
while IFS= read -r theme; do
 | 
			
		||||
    echo "Building theme: $theme"
 | 
			
		||||
    cd $theme
 | 
			
		||||
    rm -r build/$theme
 | 
			
		||||
    cd "$theme"
 | 
			
		||||
    npm install
 | 
			
		||||
    DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$version npm run build
 | 
			
		||||
    cd ..
 | 
			
		||||
done
 | 
			
		||||
done < THEMES
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user