Merge remote-tracking branch 'origin/upstream/main'

This commit is contained in:
Laisky.Cai 2024-07-23 01:14:23 +00:00
commit 4c96688e7c
22 changed files with 352 additions and 145 deletions

View File

@ -36,6 +36,8 @@ func relayHelper(c *gin.Context, relayMode int) *model.ErrorWithStatusCode {
fallthrough fallthrough
case relaymode.AudioTranscription: case relaymode.AudioTranscription:
err = controller.RelayAudioHelper(c, relayMode) err = controller.RelayAudioHelper(c, relayMode)
case relaymode.Proxy:
err = controller.RelayProxyHelper(c, relayMode)
default: default:
err = controller.RelayTextHelper(c) err = controller.RelayTextHelper(c)
} }
@ -84,7 +86,7 @@ func Relay(c *gin.Context) {
channelId := c.GetInt(ctxkey.ChannelId) channelId := c.GetInt(ctxkey.ChannelId)
lastFailedChannelId = channelId lastFailedChannelId = channelId
channelName := c.GetString(ctxkey.ChannelName) channelName := c.GetString(ctxkey.ChannelName)
// bizErr is shared, should not run this function in goroutine to avoid race // BUG: bizErr is in race condition
go processChannelRelayError(ctx, userId, channelId, channelName, bizErr) go processChannelRelayError(ctx, userId, channelId, channelName, bizErr)
} }
@ -92,6 +94,8 @@ func Relay(c *gin.Context) {
if bizErr.StatusCode == http.StatusTooManyRequests { if bizErr.StatusCode == http.StatusTooManyRequests {
bizErr.Error.Message = "当前分组上游负载已饱和,请稍后再试" bizErr.Error.Message = "当前分组上游负载已饱和,请稍后再试"
} }
// BUG: bizErr is in race condition
bizErr.Error.Message = helper.MessageWithRequestId(bizErr.Error.Message, requestId) bizErr.Error.Message = helper.MessageWithRequestId(bizErr.Error.Message, requestId)
c.JSON(bizErr.StatusCode, gin.H{ c.JSON(bizErr.StatusCode, gin.H{
"error": bizErr.Error, "error": bizErr.Error,

View File

@ -144,6 +144,12 @@ func TokenAuth() func(c *gin.Context) {
return return
} }
} }
// set channel id for proxy relay
if channelId := c.Param("channelid"); channelId != "" {
c.Set(ctxkey.SpecificChannelId, channelId)
}
c.Next() c.Next()
} }
} }

View File

@ -15,6 +15,7 @@ import (
"github.com/songquanpeng/one-api/relay/adaptor/ollama" "github.com/songquanpeng/one-api/relay/adaptor/ollama"
"github.com/songquanpeng/one-api/relay/adaptor/openai" "github.com/songquanpeng/one-api/relay/adaptor/openai"
"github.com/songquanpeng/one-api/relay/adaptor/palm" "github.com/songquanpeng/one-api/relay/adaptor/palm"
"github.com/songquanpeng/one-api/relay/adaptor/proxy"
"github.com/songquanpeng/one-api/relay/adaptor/tencent" "github.com/songquanpeng/one-api/relay/adaptor/tencent"
"github.com/songquanpeng/one-api/relay/adaptor/vertexai" "github.com/songquanpeng/one-api/relay/adaptor/vertexai"
"github.com/songquanpeng/one-api/relay/adaptor/xunfei" "github.com/songquanpeng/one-api/relay/adaptor/xunfei"
@ -58,6 +59,8 @@ func GetAdaptor(apiType int) adaptor.Adaptor {
return &deepl.Adaptor{} return &deepl.Adaptor{}
case apitype.VertexAI: case apitype.VertexAI:
return &vertexai.Adaptor{} return &vertexai.Adaptor{}
case apitype.Proxy:
return &proxy.Adaptor{}
} }
return nil return nil

View File

@ -7,8 +7,12 @@ import (
) )
func GetRequestURL(meta *meta.Meta) (string, error) { func GetRequestURL(meta *meta.Meta) (string, error) {
if meta.Mode == relaymode.ChatCompletions { switch meta.Mode {
case relaymode.ChatCompletions:
return fmt.Sprintf("%s/api/v3/chat/completions", meta.BaseURL), nil return fmt.Sprintf("%s/api/v3/chat/completions", meta.BaseURL), nil
case relaymode.Embeddings:
return fmt.Sprintf("%s/api/v3/embeddings", meta.BaseURL), nil
default:
} }
return "", fmt.Errorf("unsupported relay mode %d for doubao", meta.Mode) return "", fmt.Errorf("unsupported relay mode %d for doubao", meta.Mode)
} }

View File

@ -8,6 +8,7 @@ var ModelList = []string{
"gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613",
"gpt-4-turbo-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
"gpt-4o", "gpt-4o-2024-05-13", "gpt-4o", "gpt-4o-2024-05-13",
"gpt-4o-mini", "gpt-4o-mini-2024-07-18",
"gpt-4-vision-preview", "gpt-4-vision-preview",
"text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large", "text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large",
"text-curie-001", "text-babbage-001", "text-ada-001", "text-davinci-002", "text-davinci-003", "text-curie-001", "text-babbage-001", "text-ada-001", "text-davinci-002", "text-davinci-003",

View File

@ -110,7 +110,7 @@ func CountTokenMessages(messages []model.Message, model string) int {
if imageUrl["detail"] != nil { if imageUrl["detail"] != nil {
detail = imageUrl["detail"].(string) detail = imageUrl["detail"].(string)
} }
imageTokens, err := countImageTokens(url, detail) imageTokens, err := countImageTokens(url, detail, model)
if err != nil { if err != nil {
logger.SysError("error counting image tokens: " + err.Error()) logger.SysError("error counting image tokens: " + err.Error())
} else { } else {
@ -181,11 +181,15 @@ const (
lowDetailCost = 85 lowDetailCost = 85
highDetailCostPerTile = 170 highDetailCostPerTile = 170
additionalCost = 85 additionalCost = 85
// gpt-4o-mini cost higher than other model
gpt4oMiniLowDetailCost = 2833
gpt4oMiniHighDetailCost = 5667
gpt4oMiniAdditionalCost = 2833
) )
// https://platform.openai.com/docs/guides/vision/calculating-costs // https://platform.openai.com/docs/guides/vision/calculating-costs
// https://github.com/openai/openai-cookbook/blob/05e3f9be4c7a2ae7ecf029a7c32065b024730ebe/examples/How_to_count_tokens_with_tiktoken.ipynb // https://github.com/openai/openai-cookbook/blob/05e3f9be4c7a2ae7ecf029a7c32065b024730ebe/examples/How_to_count_tokens_with_tiktoken.ipynb
func countImageTokens(url string, detail string) (_ int, err error) { func countImageTokens(url string, detail string, model string) (_ int, err error) {
var fetchSize = true var fetchSize = true
var width, height int var width, height int
// Reference: https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding // Reference: https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding
@ -219,6 +223,9 @@ func countImageTokens(url string, detail string) (_ int, err error) {
} }
switch detail { switch detail {
case "low": case "low":
if strings.HasPrefix(model, "gpt-4o-mini") {
return gpt4oMiniLowDetailCost, nil
}
return lowDetailCost, nil return lowDetailCost, nil
case "high": case "high":
if fetchSize { if fetchSize {
@ -238,6 +245,9 @@ func countImageTokens(url string, detail string) (_ int, err error) {
height = int(float64(height) * ratio) height = int(float64(height) * ratio)
} }
numSquares := int(math.Ceil(float64(width)/512) * math.Ceil(float64(height)/512)) numSquares := int(math.Ceil(float64(width)/512) * math.Ceil(float64(height)/512))
if strings.HasPrefix(model, "gpt-4o-mini") {
return numSquares*gpt4oMiniHighDetailCost + gpt4oMiniAdditionalCost, nil
}
result := numSquares*highDetailCostPerTile + additionalCost result := numSquares*highDetailCostPerTile + additionalCost
return result, nil return result, nil
default: default:

View File

@ -0,0 +1,89 @@
package proxy
import (
"fmt"
"io"
"net/http"
"strings"
"github.com/gin-gonic/gin"
"github.com/pkg/errors"
"github.com/songquanpeng/one-api/relay/adaptor"
channelhelper "github.com/songquanpeng/one-api/relay/adaptor"
"github.com/songquanpeng/one-api/relay/meta"
"github.com/songquanpeng/one-api/relay/model"
relaymodel "github.com/songquanpeng/one-api/relay/model"
)
var _ adaptor.Adaptor = new(Adaptor)
const channelName = "proxy"
type Adaptor struct{}
func (a *Adaptor) Init(meta *meta.Meta) {
}
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
return nil, errors.New("notimplement")
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
for k, v := range resp.Header {
for _, vv := range v {
c.Writer.Header().Set(k, vv)
}
}
c.Writer.WriteHeader(resp.StatusCode)
if _, gerr := io.Copy(c.Writer, resp.Body); gerr != nil {
return nil, &relaymodel.ErrorWithStatusCode{
StatusCode: http.StatusInternalServerError,
Error: relaymodel.Error{
Message: gerr.Error(),
},
}
}
return nil, nil
}
func (a *Adaptor) GetModelList() (models []string) {
return nil
}
func (a *Adaptor) GetChannelName() string {
return channelName
}
// GetRequestURL remove static prefix, and return the real request url to the upstream service
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
prefix := fmt.Sprintf("/v1/oneapi/proxy/%d", meta.ChannelId)
return meta.BaseURL + strings.TrimPrefix(meta.RequestURLPath, prefix), nil
}
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *meta.Meta) error {
for k, v := range c.Request.Header {
req.Header.Set(k, v[0])
}
// remove unnecessary headers
req.Header.Del("Host")
req.Header.Del("Content-Length")
req.Header.Del("Accept-Encoding")
req.Header.Del("Connection")
// set authorization header
req.Header.Set("Authorization", meta.APIKey)
return nil
}
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
return nil, errors.Errorf("not implement")
}
func (a *Adaptor) DoRequest(c *gin.Context, meta *meta.Meta, requestBody io.Reader) (*http.Response, error) {
return channelhelper.DoRequestHelper(a, c, meta, requestBody)
}

View File

@ -18,6 +18,7 @@ const (
Cloudflare Cloudflare
DeepL DeepL
VertexAI VertexAI
Proxy
Dummy // this one is only for count, do not add any channel after this Dummy // this one is only for count, do not add any channel after this
) )

View File

@ -28,15 +28,17 @@ var ModelRatio = map[string]float64{
"gpt-4-32k": 30, "gpt-4-32k": 30,
"gpt-4-32k-0314": 30, "gpt-4-32k-0314": 30,
"gpt-4-32k-0613": 30, "gpt-4-32k-0613": 30,
"gpt-4-1106-preview": 5, // $0.01 / 1K tokens "gpt-4-1106-preview": 5, // $0.01 / 1K tokens
"gpt-4-0125-preview": 5, // $0.01 / 1K tokens "gpt-4-0125-preview": 5, // $0.01 / 1K tokens
"gpt-4-turbo-preview": 5, // $0.01 / 1K tokens "gpt-4-turbo-preview": 5, // $0.01 / 1K tokens
"gpt-4-turbo": 5, // $0.01 / 1K tokens "gpt-4-turbo": 5, // $0.01 / 1K tokens
"gpt-4-turbo-2024-04-09": 5, // $0.01 / 1K tokens "gpt-4-turbo-2024-04-09": 5, // $0.01 / 1K tokens
"gpt-4o": 2.5, // $0.005 / 1K tokens "gpt-4o": 2.5, // $0.005 / 1K tokens
"gpt-4o-2024-05-13": 2.5, // $0.005 / 1K tokens "gpt-4o-2024-05-13": 2.5, // $0.005 / 1K tokens
"gpt-4-vision-preview": 5, // $0.01 / 1K tokens "gpt-4o-mini": 0.075, // $0.00015 / 1K tokens
"gpt-3.5-turbo": 0.25, // $0.0005 / 1K tokens "gpt-4o-mini-2024-07-18": 0.075, // $0.00015 / 1K tokens
"gpt-4-vision-preview": 5, // $0.01 / 1K tokens
"gpt-3.5-turbo": 0.25, // $0.0005 / 1K tokens
"gpt-3.5-turbo-0301": 0.75, "gpt-3.5-turbo-0301": 0.75,
"gpt-3.5-turbo-0613": 0.75, "gpt-3.5-turbo-0613": 0.75,
"gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens "gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens
@ -308,6 +310,9 @@ func GetCompletionRatio(name string, channelType int) float64 {
return 4.0 / 3.0 return 4.0 / 3.0
} }
if strings.HasPrefix(name, "gpt-4") { if strings.HasPrefix(name, "gpt-4") {
if strings.HasPrefix(name, "gpt-4o-mini") {
return 4
}
if strings.HasPrefix(name, "gpt-4-turbo") || if strings.HasPrefix(name, "gpt-4-turbo") ||
strings.HasPrefix(name, "gpt-4o") || strings.HasPrefix(name, "gpt-4o") ||
strings.HasSuffix(name, "preview") { strings.HasSuffix(name, "preview") {

View File

@ -44,5 +44,6 @@ const (
Doubao Doubao
Novita Novita
VertextAI VertextAI
Proxy
Dummy Dummy
) )

View File

@ -37,6 +37,8 @@ func ToAPIType(channelType int) int {
apiType = apitype.DeepL apiType = apitype.DeepL
case VertextAI: case VertextAI:
apiType = apitype.VertexAI apiType = apitype.VertexAI
case Proxy:
apiType = apitype.Proxy
} }
return apiType return apiType

View File

@ -44,6 +44,7 @@ var ChannelBaseURLs = []string{
"https://ark.cn-beijing.volces.com", // 40 "https://ark.cn-beijing.volces.com", // 40
"https://api.novita.ai/v3/openai", // 41 "https://api.novita.ai/v3/openai", // 41
"", // 42 "", // 42
"", // 43
} }
func init() { func init() {

41
relay/controller/proxy.go Normal file
View File

@ -0,0 +1,41 @@
// Package controller is a package for handling the relay controller
package controller
import (
"fmt"
"net/http"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay"
"github.com/songquanpeng/one-api/relay/adaptor/openai"
"github.com/songquanpeng/one-api/relay/meta"
relaymodel "github.com/songquanpeng/one-api/relay/model"
)
// RelayProxyHelper is a helper function to proxy the request to the upstream service
func RelayProxyHelper(c *gin.Context, relayMode int) *relaymodel.ErrorWithStatusCode {
ctx := c.Request.Context()
meta := meta.GetByContext(c)
adaptor := relay.GetAdaptor(meta.APIType)
if adaptor == nil {
return openai.ErrorWrapper(fmt.Errorf("invalid api type: %d", meta.APIType), "invalid_api_type", http.StatusBadRequest)
}
adaptor.Init(meta)
resp, err := adaptor.DoRequest(c, meta, c.Request.Body)
if err != nil {
logger.Errorf(ctx, "DoRequest failed: %s", err.Error())
return openai.ErrorWrapper(err, "do_request_failed", http.StatusInternalServerError)
}
// do response
_, respErr := adaptor.DoResponse(c, resp, meta)
if respErr != nil {
logger.Errorf(ctx, "respErr is not nil: %+v", respErr)
return respErr
}
return nil
}

View File

@ -12,6 +12,7 @@ import (
"github.com/songquanpeng/one-api/common/logger" "github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/model" "github.com/songquanpeng/one-api/model"
"github.com/songquanpeng/one-api/relay" "github.com/songquanpeng/one-api/relay"
"github.com/songquanpeng/one-api/relay/adaptor"
"github.com/songquanpeng/one-api/relay/adaptor/openai" "github.com/songquanpeng/one-api/relay/adaptor/openai"
"github.com/songquanpeng/one-api/relay/apitype" "github.com/songquanpeng/one-api/relay/apitype"
"github.com/songquanpeng/one-api/relay/billing" "github.com/songquanpeng/one-api/relay/billing"
@ -33,9 +34,8 @@ func RelayTextHelper(c *gin.Context) *relaymodel.ErrorWithStatusCode {
meta.IsStream = textRequest.Stream meta.IsStream = textRequest.Stream
// map model name // map model name
var isModelMapped bool
meta.OriginModelName = textRequest.Model meta.OriginModelName = textRequest.Model
textRequest.Model, isModelMapped = getMappedModelName(textRequest.Model, meta.ModelMapping) textRequest.Model, _ = getMappedModelName(textRequest.Model, meta.ModelMapping)
meta.ActualModelName = textRequest.Model meta.ActualModelName = textRequest.Model
// get model ratio & group ratio // get model ratio & group ratio
modelRatio := billingratio.GetModelRatio(textRequest.Model, meta.ChannelType) modelRatio := billingratio.GetModelRatio(textRequest.Model, meta.ChannelType)
@ -59,30 +59,9 @@ func RelayTextHelper(c *gin.Context) *relaymodel.ErrorWithStatusCode {
adaptor.Init(meta) adaptor.Init(meta)
// get request body // get request body
var requestBody io.Reader requestBody, err := getRequestBody(c, meta, textRequest, adaptor)
if meta.APIType == apitype.OpenAI { if err != nil {
// no need to convert request for openai return openai.ErrorWrapper(err, "convert_request_failed", http.StatusInternalServerError)
shouldResetRequestBody := isModelMapped || meta.ChannelType == channeltype.Baichuan // frequency_penalty 0 is not acceptable for baichuan
if shouldResetRequestBody {
jsonStr, err := json.Marshal(textRequest)
if err != nil {
return openai.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
}
requestBody = bytes.NewBuffer(jsonStr)
} else {
requestBody = c.Request.Body
}
} else {
convertedRequest, err := adaptor.ConvertRequest(c, meta.Mode, textRequest)
if err != nil {
return openai.ErrorWrapper(err, "convert_request_failed", http.StatusInternalServerError)
}
jsonData, err := json.Marshal(convertedRequest)
if err != nil {
return openai.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
}
logger.Debugf(ctx, "converted request: \n%s", string(jsonData))
requestBody = bytes.NewBuffer(jsonData)
} }
// for debug // for debug
@ -123,3 +102,26 @@ func RelayTextHelper(c *gin.Context) *relaymodel.ErrorWithStatusCode {
return nil return nil
} }
func getRequestBody(c *gin.Context, meta *meta.Meta, textRequest *relaymodel.GeneralOpenAIRequest, adaptor adaptor.Adaptor) (io.Reader, error) {
if meta.APIType == apitype.OpenAI && meta.OriginModelName == meta.ActualModelName && meta.ChannelType != channeltype.Baichuan {
// no need to convert request for openai
return c.Request.Body, nil
}
// get request body
var requestBody io.Reader
convertedRequest, err := adaptor.ConvertRequest(c, meta.Mode, textRequest)
if err != nil {
logger.Debugf(c.Request.Context(), "converted request failed: %s\n", err.Error())
return nil, err
}
jsonData, err := json.Marshal(convertedRequest)
if err != nil {
logger.Debugf(c.Request.Context(), "converted request json_marshal_failed: %s\n", err.Error())
return nil, err
}
logger.Debugf(c.Request.Context(), "converted request: \n%s", string(jsonData))
requestBody = bytes.NewBuffer(jsonData)
return requestBody, nil
}

View File

@ -19,11 +19,12 @@ type Meta struct {
UserId int UserId int
Group string Group string
ModelMapping map[string]string ModelMapping map[string]string
BaseURL string // BaseURL is the proxy url set in the channel config
APIKey string BaseURL string
APIType int APIKey string
Config model.ChannelConfig APIType int
IsStream bool Config model.ChannelConfig
IsStream bool
// OriginModelName is the model name from the raw user request // OriginModelName is the model name from the raw user request
OriginModelName string OriginModelName string
// ActualModelName is the model name after mapping // ActualModelName is the model name after mapping

View File

@ -11,5 +11,7 @@ const (
AudioSpeech AudioSpeech
AudioTranscription AudioTranscription
AudioTranslation AudioTranslation
// Proxy is a special relay mode for proxying requests to custom upstream
Proxy
ImagesEdits ImagesEdits
) )

View File

@ -26,6 +26,8 @@ func GetByPath(path string) int {
relayMode = AudioTranslation relayMode = AudioTranslation
} else if strings.HasPrefix(path, "/v1/images/edits") { } else if strings.HasPrefix(path, "/v1/images/edits") {
relayMode = ImagesEdits relayMode = ImagesEdits
} else if strings.HasPrefix(path, "/v1/oneapi/proxy") {
relayMode = Proxy
} }
return relayMode return relayMode

View File

@ -19,6 +19,7 @@ func SetRelayRouter(router *gin.Engine) {
relayV1Router := router.Group("/v1") relayV1Router := router.Group("/v1")
relayV1Router.Use(middleware.RelayPanicRecover(), middleware.TokenAuth(), middleware.Distribute()) relayV1Router.Use(middleware.RelayPanicRecover(), middleware.TokenAuth(), middleware.Distribute())
{ {
relayV1Router.Any("/oneapi/proxy/:channelid/*target", controller.Relay)
relayV1Router.POST("/completions", controller.Relay) relayV1Router.POST("/completions", controller.Relay)
relayV1Router.POST("/chat/completions", controller.Relay) relayV1Router.POST("/chat/completions", controller.Relay)
relayV1Router.POST("/edits", controller.Relay) relayV1Router.POST("/edits", controller.Relay)

View File

@ -1,10 +1,13 @@
export const CHANNEL_OPTIONS = [ export const CHANNEL_OPTIONS = [
{ key: 1, text: 'OpenAI', value: 1, color: 'green' }, { key: 1, text: 'OpenAI', value: 1, color: 'green' },
{ key: 14, text: 'Anthropic Claude', value: 14, color: 'black' }, { key: 14, text: 'Anthropic Claude', value: 14, color: 'black' },
{ key: 33, text: 'AWS', value: 33, color: 'black' },
{ key: 3, text: 'Azure OpenAI', value: 3, color: 'olive' }, { key: 3, text: 'Azure OpenAI', value: 3, color: 'olive' },
{ key: 11, text: 'Google PaLM2', value: 11, color: 'orange' }, { key: 11, text: 'Google PaLM2', value: 11, color: 'orange' },
{ key: 24, text: 'Google Gemini', value: 24, color: 'orange' }, { key: 24, text: 'Google Gemini', value: 24, color: 'orange' },
{ key: 28, text: 'Mistral AI', value: 28, color: 'orange' }, { key: 28, text: 'Mistral AI', value: 28, color: 'orange' },
{ key: 41, text: 'Novita', value: 41, color: 'purple' },
{ key: 40, text: '字节跳动豆包', value: 40, color: 'blue' },
{ key: 15, text: '百度文心千帆', value: 15, color: 'blue' }, { key: 15, text: '百度文心千帆', value: 15, color: 'blue' },
{ key: 17, text: '阿里通义千问', value: 17, color: 'orange' }, { key: 17, text: '阿里通义千问', value: 17, color: 'orange' },
{ key: 18, text: '讯飞星火认知', value: 18, color: 'blue' }, { key: 18, text: '讯飞星火认知', value: 18, color: 'blue' },
@ -17,6 +20,15 @@ export const CHANNEL_OPTIONS = [
{ key: 29, text: 'Groq', value: 29, color: 'orange' }, { key: 29, text: 'Groq', value: 29, color: 'orange' },
{ key: 30, text: 'Ollama', value: 30, color: 'black' }, { key: 30, text: 'Ollama', value: 30, color: 'black' },
{ key: 31, text: '零一万物', value: 31, color: 'green' }, { key: 31, text: '零一万物', value: 31, color: 'green' },
{ key: 32, text: '阶跃星辰', value: 32, color: 'blue' },
{ key: 34, text: 'Coze', value: 34, color: 'blue' },
{ key: 35, text: 'Cohere', value: 35, color: 'blue' },
{ key: 36, text: 'DeepSeek', value: 36, color: 'black' },
{ key: 37, text: 'Cloudflare', value: 37, color: 'orange' },
{ key: 38, text: 'DeepL', value: 38, color: 'black' },
{ key: 39, text: 'together.ai', value: 39, color: 'blue' },
{ key: 42, text: 'VertexAI', value: 42, color: 'blue' },
{ key: 43, text: 'Proxy', value: 43, color: 'blue' },
{ key: 8, text: '自定义渠道', value: 8, color: 'pink' }, { key: 8, text: '自定义渠道', value: 8, color: 'pink' },
{ key: 22, text: '知识库FastGPT', value: 22, color: 'blue' }, { key: 22, text: '知识库FastGPT', value: 22, color: 'blue' },
{ key: 21, text: '知识库AI Proxy', value: 21, color: 'purple' }, { key: 21, text: '知识库AI Proxy', value: 21, color: 'purple' },

View File

@ -167,6 +167,12 @@ export const CHANNEL_OPTIONS = {
value: 42, value: 42,
color: 'primary' color: 'primary'
}, },
43: {
key: 43,
text: 'Proxy',
value: 43,
color: 'primary'
},
41: { 41: {
key: 41, key: 41,
text: 'Novita', text: 'Novita',

View File

@ -1,44 +1,45 @@
export const CHANNEL_OPTIONS = [ export const CHANNEL_OPTIONS = [
{key: 1, text: 'OpenAI', value: 1, color: 'green'}, { key: 1, text: 'OpenAI', value: 1, color: 'green' },
{key: 14, text: 'Anthropic Claude', value: 14, color: 'black'}, { key: 14, text: 'Anthropic Claude', value: 14, color: 'black' },
{key: 33, text: 'AWS', value: 33, color: 'black'}, { key: 33, text: 'AWS', value: 33, color: 'black' },
{key: 3, text: 'Azure OpenAI', value: 3, color: 'olive'}, { key: 3, text: 'Azure OpenAI', value: 3, color: 'olive' },
{key: 11, text: 'Google PaLM2', value: 11, color: 'orange'}, { key: 11, text: 'Google PaLM2', value: 11, color: 'orange' },
{key: 24, text: 'Google Gemini', value: 24, color: 'orange'}, { key: 24, text: 'Google Gemini', value: 24, color: 'orange' },
{key: 28, text: 'Mistral AI', value: 28, color: 'orange'}, { key: 28, text: 'Mistral AI', value: 28, color: 'orange' },
{key: 41, text: 'Novita', value: 41, color: 'purple'}, { key: 41, text: 'Novita', value: 41, color: 'purple' },
{key: 40, text: '字节跳动豆包', value: 40, color: 'blue'}, { key: 40, text: '字节跳动豆包', value: 40, color: 'blue' },
{key: 15, text: '百度文心千帆', value: 15, color: 'blue'}, { key: 15, text: '百度文心千帆', value: 15, color: 'blue' },
{key: 17, text: '阿里通义千问', value: 17, color: 'orange'}, { key: 17, text: '阿里通义千问', value: 17, color: 'orange' },
{key: 18, text: '讯飞星火认知', value: 18, color: 'blue'}, { key: 18, text: '讯飞星火认知', value: 18, color: 'blue' },
{key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet'}, { key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet' },
{key: 19, text: '360 智脑', value: 19, color: 'blue'}, { key: 19, text: '360 智脑', value: 19, color: 'blue' },
{key: 25, text: 'Moonshot AI', value: 25, color: 'black'}, { key: 25, text: 'Moonshot AI', value: 25, color: 'black' },
{key: 23, text: '腾讯混元', value: 23, color: 'teal'}, { key: 23, text: '腾讯混元', value: 23, color: 'teal' },
{key: 26, text: '百川大模型', value: 26, color: 'orange'}, { key: 26, text: '百川大模型', value: 26, color: 'orange' },
{key: 27, text: 'MiniMax', value: 27, color: 'red'}, { key: 27, text: 'MiniMax', value: 27, color: 'red' },
{key: 29, text: 'Groq', value: 29, color: 'orange'}, { key: 29, text: 'Groq', value: 29, color: 'orange' },
{key: 30, text: 'Ollama', value: 30, color: 'black'}, { key: 30, text: 'Ollama', value: 30, color: 'black' },
{key: 31, text: '零一万物', value: 31, color: 'green'}, { key: 31, text: '零一万物', value: 31, color: 'green' },
{key: 32, text: '阶跃星辰', value: 32, color: 'blue'}, { key: 32, text: '阶跃星辰', value: 32, color: 'blue' },
{key: 34, text: 'Coze', value: 34, color: 'blue'}, { key: 34, text: 'Coze', value: 34, color: 'blue' },
{key: 35, text: 'Cohere', value: 35, color: 'blue'}, { key: 35, text: 'Cohere', value: 35, color: 'blue' },
{key: 36, text: 'DeepSeek', value: 36, color: 'black'}, { key: 36, text: 'DeepSeek', value: 36, color: 'black' },
{key: 37, text: 'Cloudflare', value: 37, color: 'orange'}, { key: 37, text: 'Cloudflare', value: 37, color: 'orange' },
{key: 38, text: 'DeepL', value: 38, color: 'black'}, { key: 38, text: 'DeepL', value: 38, color: 'black' },
{key: 39, text: 'together.ai', value: 39, color: 'blue'}, { key: 39, text: 'together.ai', value: 39, color: 'blue' },
{key: 42, text: 'VertexAI', value: 42, color: 'blue'}, { key: 42, text: 'VertexAI', value: 42, color: 'blue' },
{key: 8, text: '自定义渠道', value: 8, color: 'pink'}, { key: 43, text: 'Proxy', value: 43, color: 'blue' },
{key: 22, text: '知识库FastGPT', value: 22, color: 'blue'}, { key: 8, text: '自定义渠道', value: 8, color: 'pink' },
{key: 21, text: '知识库AI Proxy', value: 21, color: 'purple'}, { key: 22, text: '知识库FastGPT', value: 22, color: 'blue' },
{key: 20, text: '代理OpenRouter', value: 20, color: 'black'}, { key: 21, text: '知识库AI Proxy', value: 21, color: 'purple' },
{key: 2, text: '代理API2D', value: 2, color: 'blue'}, { key: 20, text: '代理OpenRouter', value: 20, color: 'black' },
{key: 5, text: '代理OpenAI-SB', value: 5, color: 'brown'}, { key: 2, text: '代理API2D', value: 2, color: 'blue' },
{key: 7, text: '代理OhMyGPT', value: 7, color: 'purple'}, { key: 5, text: '代理OpenAI-SB', value: 5, color: 'brown' },
{key: 10, text: '代理AI Proxy', value: 10, color: 'purple'}, { key: 7, text: '代理OhMyGPT', value: 7, color: 'purple' },
{key: 4, text: '代理CloseAI', value: 4, color: 'teal'}, { key: 10, text: '代理AI Proxy', value: 10, color: 'purple' },
{key: 6, text: '代理OpenAI Max', value: 6, color: 'violet'}, { key: 4, text: '代理CloseAI', value: 4, color: 'teal' },
{key: 9, text: '代理AI.LS', value: 9, color: 'yellow'}, { key: 6, text: '代理OpenAI Max', value: 6, color: 'violet' },
{key: 12, text: '代理API2GPT', value: 12, color: 'blue'}, { key: 9, text: '代理AI.LS', value: 9, color: 'yellow' },
{key: 13, text: '代理AIGC2D', value: 13, color: 'purple'} { key: 12, text: '代理API2GPT', value: 12, color: 'blue' },
{ key: 13, text: '代理AIGC2D', value: 13, color: 'purple' }
]; ];

View File

@ -170,7 +170,7 @@ const EditChannel = () => {
showInfo('请填写渠道名称和渠道密钥!'); showInfo('请填写渠道名称和渠道密钥!');
return; return;
} }
if (inputs.models.length === 0) { if (inputs.type !== 43 && inputs.models.length === 0) {
showInfo('请至少选择一个模型!'); showInfo('请至少选择一个模型!');
return; return;
} }
@ -370,63 +370,75 @@ const EditChannel = () => {
</Message> </Message>
) )
} }
<Form.Field> {
<Form.Dropdown inputs.type !== 43 && (
label='模型' <Form.Field>
placeholder={'请选择该渠道所支持的模型'} <Form.Dropdown
name='models' label='模型'
required placeholder={'请选择该渠道所支持的模型'}
fluid name='models'
multiple required
search fluid
onLabelClick={(e, { value }) => { multiple
copy(value).then(); search
}} onLabelClick={(e, { value }) => {
selection copy(value).then();
onChange={handleInputChange} }}
value={inputs.models} selection
autoComplete='new-password' onChange={handleInputChange}
options={modelOptions} value={inputs.models}
/> autoComplete='new-password'
</Form.Field> options={modelOptions}
<div style={{ lineHeight: '40px', marginBottom: '12px' }}> />
<Button type={'button'} onClick={() => { </Form.Field>
handleInputChange(null, { name: 'models', value: basicModels }); )
}}>填入相关模型</Button> }
<Button type={'button'} onClick={() => { {
handleInputChange(null, { name: 'models', value: fullModels }); inputs.type !== 43 && (
}}>填入所有模型</Button> <div style={{ lineHeight: '40px', marginBottom: '12px' }}>
<Button type={'button'} onClick={() => { <Button type={'button'} onClick={() => {
handleInputChange(null, { name: 'models', value: [] }); handleInputChange(null, { name: 'models', value: basicModels });
}}>清除所有模型</Button> }}>填入相关模型</Button>
<Input <Button type={'button'} onClick={() => {
action={ handleInputChange(null, { name: 'models', value: fullModels });
<Button type={'button'} onClick={addCustomModel}>填入</Button> }}>填入所有模型</Button>
} <Button type={'button'} onClick={() => {
placeholder='输入自定义模型名称' handleInputChange(null, { name: 'models', value: [] });
value={customModel} }}>清除所有模型</Button>
onChange={(e, { value }) => { <Input
setCustomModel(value); action={
}} <Button type={'button'} onClick={addCustomModel}>填入</Button>
onKeyDown={(e) => { }
if (e.key === 'Enter') { placeholder='输入自定义模型名称'
addCustomModel(); value={customModel}
e.preventDefault(); onChange={(e, { value }) => {
} setCustomModel(value);
}} }}
/> onKeyDown={(e) => {
</div> if (e.key === 'Enter') {
<Form.Field> addCustomModel();
<Form.TextArea e.preventDefault();
label='模型重定向' }
placeholder={`此项可选,用于修改请求体中的模型名称,为一个 JSON 字符串,键为请求中模型名称,值为要替换的模型名称,例如:\n${JSON.stringify(MODEL_MAPPING_EXAMPLE, null, 2)}`} }}
name='model_mapping' />
onChange={handleInputChange} </div>
value={inputs.model_mapping} )
style={{ minHeight: 150, fontFamily: 'JetBrains Mono, Consolas' }} }
autoComplete='new-password' {
/> inputs.type !== 43 && (
</Form.Field> <Form.Field>
<Form.TextArea
label='模型重定向'
placeholder={`此项可选,用于修改请求体中的模型名称,为一个 JSON 字符串,键为请求中模型名称,值为要替换的模型名称,例如:\n${JSON.stringify(MODEL_MAPPING_EXAMPLE, null, 2)}`}
name='model_mapping'
onChange={handleInputChange}
value={inputs.model_mapping}
style={{ minHeight: 150, fontFamily: 'JetBrains Mono, Consolas' }}
autoComplete='new-password'
/>
</Form.Field>
)
}
{ {
inputs.type === 33 && ( inputs.type === 33 && (
<Form.Field> <Form.Field>