Compare commits

..

9 Commits

Author SHA1 Message Date
Martial BE
fc284cc1f0 🐛 fix return to first page after refresh 2023-12-29 17:02:09 +08:00
Martial BE
9c0a49b97a add custom test model 2023-12-29 16:23:25 +08:00
Martial BE
61c47a3b08 🎨 Support Baichuan2 2023-12-29 15:23:05 +08:00
Martial BE
c5aa59e297 🐛 fix xunfei request error 2023-12-29 11:32:43 +08:00
Martial BE
211a862d54 🎨 Support qwen-vl-plus 2023-12-29 10:59:26 +08:00
Martial BE
c4c89e8e1b 🎨 change gemini safety settings 2023-12-28 16:49:31 +08:00
Martial BE
72983ac734 Model list return by group 2023-12-28 15:48:20 +08:00
Martial BE
4d43dce64b 🐛 fix base 64 encoded format support of gemini-pro-vision 2023-12-28 12:23:39 +08:00
Martial BE
0fa94d3c94 🐛 fix log channel is null 2023-12-28 11:27:52 +08:00
32 changed files with 539 additions and 660 deletions

View File

@@ -189,6 +189,7 @@ const (
ChannelTypeTencent = 23
ChannelTypeAzureSpeech = 24
ChannelTypeGemini = 25
ChannelTypeBaichuan = 26
)
var ChannelBaseURLs = []string{
@@ -218,6 +219,7 @@ var ChannelBaseURLs = []string{
"https://hunyuan.cloud.tencent.com", //23
"", //24
"", //25
"https://api.baichuan-ai.com", //26
}
const (

View File

@@ -3,6 +3,7 @@ package image
import (
"bytes"
"encoding/base64"
"errors"
"image"
_ "image/gif"
_ "image/jpeg"
@@ -44,8 +45,26 @@ func GetImageSizeFromUrl(url string) (width int, height int, err error) {
}
func GetImageFromUrl(url string) (mimeType string, data string, err error) {
if strings.HasPrefix(url, "data:image/") {
dataURLPattern := regexp.MustCompile(`data:image/([^;]+);base64,(.*)`)
matches := dataURLPattern.FindStringSubmatch(url)
if len(matches) == 3 && matches[2] != "" {
mimeType = "image/" + matches[1]
data = matches[2]
return
}
err = errors.New("image base64 decode failed")
return
}
isImage, err := IsImageUrl(url)
if !isImage {
if err == nil {
err = errors.New("invalid image link")
}
return
}
resp, err := http.Get(url)

View File

@@ -169,3 +169,34 @@ func TestGetImageSizeFromBase64(t *testing.T) {
})
}
}
func TestGetImageFromUrl(t *testing.T) {
for i, c := range cases {
t.Run("Decode:"+strconv.Itoa(i), func(t *testing.T) {
resp, err := http.Get(c.url)
assert.NoError(t, err)
defer resp.Body.Close()
data, err := io.ReadAll(resp.Body)
assert.NoError(t, err)
encoded := base64.StdEncoding.EncodeToString(data)
mimeType, base64Data, err := img.GetImageFromUrl(c.url)
assert.NoError(t, err)
assert.Equal(t, encoded, base64Data)
assert.Equal(t, "image/"+c.format, mimeType)
encodedBase64 := "data:image/" + c.format + ";base64," + encoded
mimeType, base64Data, err = img.GetImageFromUrl(encodedBase64)
assert.NoError(t, err)
assert.Equal(t, encoded, base64Data)
assert.Equal(t, "image/"+c.format, mimeType)
})
}
url := "https://raw.githubusercontent.com/songquanpeng/one-api/main/README.md"
_, _, err := img.GetImageFromUrl(url)
assert.Error(t, err)
encodedBase64 := "data:image/text;base64,"
_, _, err = img.GetImageFromUrl(encodedBase64)
assert.Error(t, err)
}

View File

@@ -93,6 +93,7 @@ var ModelRatio = map[string]float64{
"qwen-plus": 1.4286, // ¥0.02 / 1k tokens
"qwen-max": 1.4286, // ¥0.02 / 1k tokens
"qwen-max-longcontext": 1.4286, // ¥0.02 / 1k tokens
"qwen-vl-plus": 0.5715, // ¥0.008 / 1k tokens
"text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens
"SparkDesk": 1.2858, // ¥0.018 / 1k tokens
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
@@ -100,6 +101,10 @@ var ModelRatio = map[string]float64{
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"hunyuan": 7.143, // ¥0.1 / 1k tokens // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
"Baichuan2-Turbo": 0.5715, // ¥0.008 / 1k tokens
"Baichuan2-Turbo-192k": 1.143, // ¥0.016 / 1k tokens
"Baichuan2-53B": 1.4286, // ¥0.02 / 1k tokens
"Baichuan-Text-Embedding": 0.0357, // ¥0.0005 / 1k tokens
}
func ModelRatio2JSONString() string {

View File

@@ -190,13 +190,13 @@ func countImageTokens(url string, detail string) (_ int, err error) {
func CountTokenInput(input any, model string) int {
switch v := input.(type) {
case string:
return CountTokenInput(v, model)
return CountTokenText(v, model)
case []string:
text := ""
for _, s := range v {
text += s
}
return CountTokenInput(text, model)
return CountTokenText(text, model)
}
return 0
}

View File

@@ -18,6 +18,10 @@ import (
)
func testChannel(channel *model.Channel, request types.ChatCompletionRequest) (err error, openaiErr *types.OpenAIError) {
if channel.TestModel == "" {
return errors.New("请填写测速模型后再试"), nil
}
// 创建一个 http.Request
req, err := http.NewRequest("POST", "/v1/chat/completions", nil)
if err != nil {
@@ -28,26 +32,7 @@ func testChannel(channel *model.Channel, request types.ChatCompletionRequest) (e
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request = req
// 创建映射
channelTypeToModel := map[int]string{
common.ChannelTypePaLM: "PaLM-2",
common.ChannelTypeAnthropic: "claude-2",
common.ChannelTypeBaidu: "ERNIE-Bot",
common.ChannelTypeZhipu: "chatglm_lite",
common.ChannelTypeAli: "qwen-turbo",
common.ChannelType360: "360GPT_S2_V9",
common.ChannelTypeXunfei: "SparkDesk",
common.ChannelTypeTencent: "hunyuan",
common.ChannelTypeAzure: "gpt-3.5-turbo",
}
// 从映射中获取模型名称
model, ok := channelTypeToModel[channel.Type]
if !ok {
model = "gpt-3.5-turbo" // 默认值
}
request.Model = model
request.Model = channel.TestModel
provider := providers.GetProvider(channel, c)
if provider == nil {
@@ -69,13 +54,15 @@ func testChannel(channel *model.Channel, request types.ChatCompletionRequest) (e
promptTokens := common.CountTokenMessages(request.Messages, request.Model)
Usage, openAIErrorWithStatusCode := chatProvider.ChatAction(&request, true, promptTokens)
if openAIErrorWithStatusCode != nil {
return nil, &openAIErrorWithStatusCode.OpenAIError
return errors.New(openAIErrorWithStatusCode.Message), &openAIErrorWithStatusCode.OpenAIError
}
if Usage.CompletionTokens == 0 {
return fmt.Errorf("channel %s, message 补全 tokens 非预期返回 0", channel.Name), nil
}
common.SysLog(fmt.Sprintf("测试模型 %s 返回内容为:%s", channel.Name, w.Body.String()))
return nil, nil
}

View File

@@ -2,7 +2,11 @@ package controller
import (
"fmt"
"net/http"
"one-api/common"
"one-api/model"
"one-api/types"
"sort"
"github.com/gin-gonic/gin"
)
@@ -25,559 +29,38 @@ type OpenAIModelPermission struct {
}
type OpenAIModels struct {
Id string `json:"id"`
Object string `json:"object"`
Created int `json:"created"`
OwnedBy string `json:"owned_by"`
Permission []OpenAIModelPermission `json:"permission"`
Root string `json:"root"`
Parent *string `json:"parent"`
Id string `json:"id"`
Object string `json:"object"`
Created int `json:"created"`
OwnedBy *string `json:"owned_by"`
Permission *[]OpenAIModelPermission `json:"permission"`
Root *string `json:"root"`
Parent *string `json:"parent"`
}
var openAIModels []OpenAIModels
var openAIModelsMap map[string]OpenAIModels
func init() {
var permission []OpenAIModelPermission
permission = append(permission, OpenAIModelPermission{
Id: "modelperm-LwHkVFn8AcMItP432fKKDIKJ",
Object: "model_permission",
Created: 1626777600,
AllowCreateEngine: true,
AllowSampling: true,
AllowLogprobs: true,
AllowSearchIndices: false,
AllowView: true,
AllowFineTuning: false,
Organization: "*",
Group: nil,
IsBlocking: false,
})
// https://platform.openai.com/docs/models/model-endpoint-compatibility
openAIModels = []OpenAIModels{
{
Id: "dall-e-2",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "dall-e-2",
Parent: nil,
},
{
Id: "dall-e-3",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "dall-e-3",
Parent: nil,
},
{
Id: "whisper-1",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "whisper-1",
Parent: nil,
},
{
Id: "tts-1",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "tts-1",
Parent: nil,
},
{
Id: "tts-1-1106",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "tts-1-1106",
Parent: nil,
},
{
Id: "tts-1-hd",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "tts-1-hd",
Parent: nil,
},
{
Id: "tts-1-hd-1106",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "tts-1-hd-1106",
Parent: nil,
},
{
Id: "gpt-3.5-turbo",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-0301",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-0301",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-0613",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-16k",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-16k",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-16k-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-16k-0613",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-1106",
Object: "model",
Created: 1699593571,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-1106",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-instruct",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-instruct",
Parent: nil,
},
{
Id: "gpt-4",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4",
Parent: nil,
},
{
Id: "gpt-4-0314",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-0314",
Parent: nil,
},
{
Id: "gpt-4-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-0613",
Parent: nil,
},
{
Id: "gpt-4-32k",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-32k",
Parent: nil,
},
{
Id: "gpt-4-32k-0314",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-32k-0314",
Parent: nil,
},
{
Id: "gpt-4-32k-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-32k-0613",
Parent: nil,
},
{
Id: "gpt-4-1106-preview",
Object: "model",
Created: 1699593571,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-1106-preview",
Parent: nil,
},
{
Id: "gpt-4-vision-preview",
Object: "model",
Created: 1699593571,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-vision-preview",
Parent: nil,
},
{
Id: "text-embedding-ada-002",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-embedding-ada-002",
Parent: nil,
},
{
Id: "text-davinci-003",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-davinci-003",
Parent: nil,
},
{
Id: "text-davinci-002",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-davinci-002",
Parent: nil,
},
{
Id: "text-curie-001",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-curie-001",
Parent: nil,
},
{
Id: "text-babbage-001",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-babbage-001",
Parent: nil,
},
{
Id: "text-ada-001",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-ada-001",
Parent: nil,
},
{
Id: "text-moderation-latest",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-moderation-latest",
Parent: nil,
},
{
Id: "text-moderation-stable",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-moderation-stable",
Parent: nil,
},
{
Id: "text-davinci-edit-001",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-davinci-edit-001",
Parent: nil,
},
{
Id: "code-davinci-edit-001",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "code-davinci-edit-001",
Parent: nil,
},
{
Id: "claude-instant-1",
Object: "model",
Created: 1677649963,
OwnedBy: "anthropic",
Permission: permission,
Root: "claude-instant-1",
Parent: nil,
},
{
Id: "claude-2",
Object: "model",
Created: 1677649963,
OwnedBy: "anthropic",
Permission: permission,
Root: "claude-2",
Parent: nil,
},
{
Id: "claude-2.1",
Object: "model",
Created: 1677649963,
OwnedBy: "anthropic",
Permission: permission,
Root: "claude-2.1",
Parent: nil,
},
{
Id: "claude-2.0",
Object: "model",
Created: 1677649963,
OwnedBy: "anthropic",
Permission: permission,
Root: "claude-2.0",
Parent: nil,
},
{
Id: "ERNIE-Bot",
Object: "model",
Created: 1677649963,
OwnedBy: "baidu",
Permission: permission,
Root: "ERNIE-Bot",
Parent: nil,
},
{
Id: "ERNIE-Bot-turbo",
Object: "model",
Created: 1677649963,
OwnedBy: "baidu",
Permission: permission,
Root: "ERNIE-Bot-turbo",
Parent: nil,
},
{
Id: "ERNIE-Bot-4",
Object: "model",
Created: 1677649963,
OwnedBy: "baidu",
Permission: permission,
Root: "ERNIE-Bot-4",
Parent: nil,
},
{
Id: "Embedding-V1",
Object: "model",
Created: 1677649963,
OwnedBy: "baidu",
Permission: permission,
Root: "Embedding-V1",
Parent: nil,
},
{
Id: "PaLM-2",
Object: "model",
Created: 1677649963,
OwnedBy: "google",
Permission: permission,
Root: "PaLM-2",
Parent: nil,
},
{
Id: "gemini-pro",
Object: "model",
Created: 1677649963,
OwnedBy: "google",
Permission: permission,
Root: "gemini-pro",
Parent: nil,
},
{
Id: "gemini-pro-vision",
Object: "model",
Created: 1677649963,
OwnedBy: "google",
Permission: permission,
Root: "gemini-pro-vision",
Parent: nil,
},
{
Id: "chatglm_turbo",
Object: "model",
Created: 1677649963,
OwnedBy: "zhipu",
Permission: permission,
Root: "chatglm_turbo",
Parent: nil,
},
{
Id: "chatglm_pro",
Object: "model",
Created: 1677649963,
OwnedBy: "zhipu",
Permission: permission,
Root: "chatglm_pro",
Parent: nil,
},
{
Id: "chatglm_std",
Object: "model",
Created: 1677649963,
OwnedBy: "zhipu",
Permission: permission,
Root: "chatglm_std",
Parent: nil,
},
{
Id: "chatglm_lite",
Object: "model",
Created: 1677649963,
OwnedBy: "zhipu",
Permission: permission,
Root: "chatglm_lite",
Parent: nil,
},
{
Id: "qwen-turbo",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "qwen-turbo",
Parent: nil,
},
{
Id: "qwen-plus",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "qwen-plus",
Parent: nil,
},
{
Id: "qwen-max",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "qwen-max",
Parent: nil,
},
{
Id: "qwen-max-longcontext",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "qwen-max-longcontext",
Parent: nil,
},
{
Id: "text-embedding-v1",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "text-embedding-v1",
Parent: nil,
},
{
Id: "SparkDesk",
Object: "model",
Created: 1677649963,
OwnedBy: "xunfei",
Permission: permission,
Root: "SparkDesk",
Parent: nil,
},
{
Id: "360GPT_S2_V9",
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Permission: permission,
Root: "360GPT_S2_V9",
Parent: nil,
},
{
Id: "embedding-bert-512-v1",
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Permission: permission,
Root: "embedding-bert-512-v1",
Parent: nil,
},
{
Id: "embedding_s1_v1",
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Permission: permission,
Root: "embedding_s1_v1",
Parent: nil,
},
{
Id: "semantic_similarity_s1_v1",
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Permission: permission,
Root: "semantic_similarity_s1_v1",
Parent: nil,
},
{
Id: "hunyuan",
Object: "model",
Created: 1677649963,
OwnedBy: "tencent",
Permission: permission,
Root: "hunyuan",
Parent: nil,
},
keys := make([]string, 0, len(common.ModelRatio))
for k := range common.ModelRatio {
keys = append(keys, k)
}
sort.Strings(keys)
for _, modelId := range keys {
openAIModels = append(openAIModels, OpenAIModels{
Id: modelId,
Object: "model",
Created: 1677649963,
OwnedBy: nil,
Permission: nil,
Root: nil,
Parent: nil,
})
}
openAIModelsMap = make(map[string]OpenAIModels)
for _, model := range openAIModels {
openAIModelsMap[model.Id] = model
@@ -585,6 +68,35 @@ func init() {
}
func ListModels(c *gin.Context) {
groupName := c.GetString("group")
models, err := model.CacheGetGroupModels(groupName)
if err != nil {
common.AbortWithMessage(c, http.StatusServiceUnavailable, err.Error())
return
}
sort.Strings(models)
groupOpenAIModels := make([]OpenAIModels, 0, len(models))
for _, modelId := range models {
groupOpenAIModels = append(groupOpenAIModels, OpenAIModels{
Id: modelId,
Object: "model",
Created: 1677649963,
OwnedBy: nil,
Permission: nil,
Root: nil,
Parent: nil,
})
}
c.JSON(200, gin.H{
"object": "list",
"data": groupOpenAIModels,
})
}
func ListModelsForAdmin(c *gin.Context) {
c.JSON(200, gin.H{
"object": "list",
"data": openAIModels,

View File

@@ -45,6 +45,8 @@ func fetchChannel(c *gin.Context, modelName string) (channel *model.Channel, pas
return
}
c.Set("channel_id", channel.Id)
return
}

View File

@@ -39,6 +39,22 @@ func GetRandomSatisfiedChannel(group string, model string) (*Channel, error) {
return &channel, err
}
func GetGroupModels(group string) ([]string, error) {
var models []string
groupCol := "`group`"
trueVal := "1"
if common.UsingPostgreSQL {
groupCol = `"group"`
trueVal = "true"
}
err := DB.Model(&Ability{}).Where(groupCol+" = ? and enabled = ? ", group, trueVal).Distinct("model").Pluck("model", &models).Error
if err != nil {
return nil, err
}
return models, nil
}
func (channel *Channel) AddAbilities() error {
models_ := strings.Split(channel.Models, ",")
groups_ := strings.Split(channel.Group, ",")

View File

@@ -213,3 +213,22 @@ func CacheGetRandomSatisfiedChannel(group string, model string) (*Channel, error
idx := rand.Intn(endIdx)
return channels[idx], nil
}
func CacheGetGroupModels(group string) ([]string, error) {
if !common.MemoryCacheEnabled {
return GetGroupModels(group)
}
channelSyncLock.RLock()
defer channelSyncLock.RUnlock()
groupModels := group2model2channels[group]
if groupModels == nil {
return nil, errors.New("group not found")
}
models := make([]string, 0)
for model := range groupModels {
models = append(models, model)
}
return models, nil
}

View File

@@ -26,6 +26,7 @@ type Channel struct {
ModelMapping *string `json:"model_mapping" gorm:"type:varchar(1024);default:''"`
Priority *int64 `json:"priority" gorm:"bigint;default:0"`
Proxy string `json:"proxy" gorm:"type:varchar(255);default:''"`
TestModel string `json:"test_model" gorm:"type:varchar(50);default:''"`
}
func GetAllChannels(startIdx int, num int, selectAll bool) ([]*Channel, error) {

View File

@@ -2,6 +2,7 @@ package ali
import (
"fmt"
"strings"
"one-api/providers/base"
@@ -28,6 +29,16 @@ type AliProvider struct {
base.BaseProvider
}
func (p *AliProvider) GetFullRequestURL(requestURL string, modelName string) string {
baseURL := strings.TrimSuffix(p.GetBaseURL(), "/")
if modelName == "qwen-vl-plus" {
requestURL = "/api/v1/services/aigc/multimodal-generation/generation"
}
return fmt.Sprintf("%s%s", baseURL, requestURL)
}
// 获取请求头
func (p *AliProvider) GetRequestHeaders() (headers map[string]string) {
headers = make(map[string]string)

View File

@@ -26,21 +26,12 @@ func (aliResponse *AliChatResponse) ResponseHandler(resp *http.Response) (OpenAI
return
}
choice := types.ChatCompletionChoice{
Index: 0,
Message: types.ChatCompletionMessage{
Role: "assistant",
Content: aliResponse.Output.Text,
},
FinishReason: aliResponse.Output.FinishReason,
}
OpenAIResponse = types.ChatCompletionResponse{
ID: aliResponse.RequestId,
Object: "chat.completion",
Created: common.GetTimestamp(),
Model: aliResponse.Model,
Choices: []types.ChatCompletionChoice{choice},
Choices: aliResponse.Output.ToChatCompletionChoices(),
Usage: &types.Usage{
PromptTokens: aliResponse.Usage.InputTokens,
CompletionTokens: aliResponse.Usage.OutputTokens,
@@ -58,10 +49,31 @@ func (p *AliProvider) getChatRequestBody(request *types.ChatCompletionRequest) *
messages := make([]AliMessage, 0, len(request.Messages))
for i := 0; i < len(request.Messages); i++ {
message := request.Messages[i]
messages = append(messages, AliMessage{
Content: message.StringContent(),
Role: strings.ToLower(message.Role),
})
if request.Model != "qwen-vl-plus" {
messages = append(messages, AliMessage{
Content: message.StringContent(),
Role: strings.ToLower(message.Role),
})
} else {
openaiContent := message.ParseContent()
var parts []AliMessagePart
for _, part := range openaiContent {
if part.Type == types.ContentTypeText {
parts = append(parts, AliMessagePart{
Text: part.Text,
})
} else if part.Type == types.ContentTypeImageURL {
parts = append(parts, AliMessagePart{
Image: part.ImageURL.URL,
})
}
}
messages = append(messages, AliMessage{
Content: parts,
Role: strings.ToLower(message.Role),
})
}
}
enableSearch := false
@@ -77,6 +89,7 @@ func (p *AliProvider) getChatRequestBody(request *types.ChatCompletionRequest) *
Messages: messages,
},
Parameters: AliParameters{
ResultFormat: "message",
EnableSearch: enableSearch,
IncrementalOutput: request.Stream,
},
@@ -87,6 +100,7 @@ func (p *AliProvider) getChatRequestBody(request *types.ChatCompletionRequest) *
func (p *AliProvider) ChatAction(request *types.ChatCompletionRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
requestBody := p.getChatRequestBody(request)
fullRequestURL := p.GetFullRequestURL(p.ChatCompletions, request.Model)
headers := p.GetRequestHeaders()
if request.Stream {
@@ -134,10 +148,15 @@ func (p *AliProvider) ChatAction(request *types.ChatCompletionRequest, isModelMa
// 阿里云响应转OpenAI响应
func (p *AliProvider) streamResponseAli2OpenAI(aliResponse *AliChatResponse) *types.ChatCompletionStreamResponse {
// chatChoice := aliResponse.Output.ToChatCompletionChoices()
// jsonBody, _ := json.MarshalIndent(chatChoice, "", " ")
// fmt.Println("requestBody:", string(jsonBody))
var choice types.ChatCompletionStreamChoice
choice.Delta.Content = aliResponse.Output.Text
if aliResponse.Output.FinishReason != "null" {
finishReason := aliResponse.Output.FinishReason
choice.Index = aliResponse.Output.Choices[0].Index
choice.Delta.Content = aliResponse.Output.Choices[0].Message.StringContent()
// fmt.Println("choice.Delta.Content:", chatChoice[0].Message)
if aliResponse.Output.Choices[0].FinishReason != "null" {
finishReason := aliResponse.Output.Choices[0].FinishReason
choice.FinishReason = &finishReason
}
@@ -200,7 +219,8 @@ func (p *AliProvider) sendStreamRequest(req *http.Request, model string) (usage
stopChan <- true
}()
common.SetEventStreamHeaders(p.Context)
// lastResponseText := ""
lastResponseText := ""
index := 0
p.Context.Stream(func(w io.Writer) bool {
select {
case data := <-dataChan:
@@ -216,9 +236,11 @@ func (p *AliProvider) sendStreamRequest(req *http.Request, model string) (usage
usage.TotalTokens = aliResponse.Usage.InputTokens + aliResponse.Usage.OutputTokens
}
aliResponse.Model = model
aliResponse.Output.Choices[0].Index = index
index++
response := p.streamResponseAli2OpenAI(&aliResponse)
// response.Choices[0].Delta.Content = strings.TrimPrefix(response.Choices[0].Delta.Content, lastResponseText)
// lastResponseText = aliResponse.Output.Text
response.Choices[0].Delta.Content = strings.TrimPrefix(response.Choices[0].Delta.Content, lastResponseText)
lastResponseText = aliResponse.Output.Choices[0].Message.StringContent()
jsonResponse, err := json.Marshal(response)
if err != nil {
common.SysError("error marshalling stream response: " + err.Error())

View File

@@ -1,5 +1,9 @@
package ali
import (
"one-api/types"
)
type AliError struct {
Code string `json:"code"`
Message string `json:"message"`
@@ -13,10 +17,15 @@ type AliUsage struct {
}
type AliMessage struct {
Content string `json:"content"`
Content any `json:"content"`
Role string `json:"role"`
}
type AliMessagePart struct {
Text string `json:"text,omitempty"`
Image string `json:"image,omitempty"`
}
type AliInput struct {
// Prompt string `json:"prompt"`
Messages []AliMessage `json:"messages"`
@@ -28,6 +37,7 @@ type AliParameters struct {
Seed uint64 `json:"seed,omitempty"`
EnableSearch bool `json:"enable_search,omitempty"`
IncrementalOutput bool `json:"incremental_output,omitempty"`
ResultFormat string `json:"result_format,omitempty"`
}
type AliChatRequest struct {
@@ -36,9 +46,25 @@ type AliChatRequest struct {
Parameters AliParameters `json:"parameters,omitempty"`
}
type AliChoice struct {
FinishReason string `json:"finish_reason"`
Message types.ChatCompletionMessage `json:"message"`
}
type AliOutput struct {
Text string `json:"text"`
FinishReason string `json:"finish_reason"`
Choices []types.ChatCompletionChoice `json:"choices"`
}
func (o *AliOutput) ToChatCompletionChoices() []types.ChatCompletionChoice {
for i := range o.Choices {
_, ok := o.Choices[i].Message.Content.(string)
if ok {
continue
}
o.Choices[i].Message.Content = o.Choices[i].Message.ParseContent()
}
return o.Choices
}
type AliChatResponse struct {

View File

@@ -0,0 +1,30 @@
package baichuan
import (
"one-api/providers/base"
"one-api/providers/openai"
"github.com/gin-gonic/gin"
)
// 定义供应商工厂
type BaichuanProviderFactory struct{}
// 创建 BaichuanProvider
// https://platform.baichuan-ai.com/docs/api
func (f BaichuanProviderFactory) Create(c *gin.Context) base.ProviderInterface {
return &BaichuanProvider{
OpenAIProvider: openai.OpenAIProvider{
BaseProvider: base.BaseProvider{
BaseURL: "https://api.baichuan-ai.com",
ChatCompletions: "/v1/chat/completions",
Embeddings: "/v1/embeddings",
Context: c,
},
},
}
}
type BaichuanProvider struct {
openai.OpenAIProvider
}

100
providers/baichuan/chat.go Normal file
View File

@@ -0,0 +1,100 @@
package baichuan
import (
"net/http"
"one-api/common"
"one-api/providers/openai"
"one-api/types"
"strings"
)
func (baichuanResponse *BaichuanChatResponse) ResponseHandler(resp *http.Response) (OpenAIResponse any, errWithCode *types.OpenAIErrorWithStatusCode) {
if baichuanResponse.Error.Message != "" {
errWithCode = &types.OpenAIErrorWithStatusCode{
OpenAIError: baichuanResponse.Error,
StatusCode: resp.StatusCode,
}
return
}
OpenAIResponse = types.ChatCompletionResponse{
ID: baichuanResponse.ID,
Object: baichuanResponse.Object,
Created: baichuanResponse.Created,
Model: baichuanResponse.Model,
Choices: baichuanResponse.Choices,
Usage: baichuanResponse.Usage,
}
return
}
// 获取聊天请求体
func (p *BaichuanProvider) getChatRequestBody(request *types.ChatCompletionRequest) *BaichuanChatRequest {
messages := make([]BaichuanMessage, 0, len(request.Messages))
for i := 0; i < len(request.Messages); i++ {
message := request.Messages[i]
if message.Role == "system" || message.Role == "assistant" {
message.Role = "assistant"
} else {
message.Role = "user"
}
messages = append(messages, BaichuanMessage{
Content: message.StringContent(),
Role: strings.ToLower(message.Role),
})
}
return &BaichuanChatRequest{
Model: request.Model,
Messages: messages,
Stream: request.Stream,
Temperature: request.Temperature,
TopP: request.TopP,
TopK: request.N,
}
}
// 聊天
func (p *BaichuanProvider) ChatAction(request *types.ChatCompletionRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
requestBody := p.getChatRequestBody(request)
fullRequestURL := p.GetFullRequestURL(p.ChatCompletions, request.Model)
headers := p.GetRequestHeaders()
if request.Stream {
headers["Accept"] = "text/event-stream"
}
client := common.NewClient()
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
if err != nil {
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
}
if request.Stream {
openAIProviderChatStreamResponse := &openai.OpenAIProviderChatStreamResponse{}
var textResponse string
errWithCode, textResponse = p.SendStreamRequest(req, openAIProviderChatStreamResponse)
if errWithCode != nil {
return
}
usage = &types.Usage{
PromptTokens: promptTokens,
CompletionTokens: common.CountTokenText(textResponse, request.Model),
TotalTokens: promptTokens + common.CountTokenText(textResponse, request.Model),
}
} else {
baichuanResponse := &BaichuanChatResponse{}
errWithCode = p.SendRequest(req, baichuanResponse, false)
if errWithCode != nil {
return
}
usage = baichuanResponse.Usage
}
return
}

View File

@@ -0,0 +1,36 @@
package baichuan
import "one-api/providers/openai"
type BaichuanMessage struct {
Role string `json:"role"`
Content string `json:"content"`
}
type BaichuanKnowledgeBase struct {
Ids []string `json:"id"`
}
type BaichuanChatRequest struct {
Model string `json:"model"`
Messages []BaichuanMessage `json:"messages"`
Stream bool `json:"stream,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
TopK int `json:"top_k,omitempty"`
WithSearchEnhance bool `json:"with_search_enhance,omitempty"`
KnowledgeBase BaichuanKnowledgeBase `json:"knowledge_base,omitempty"`
}
type BaichuanKnowledgeBaseResponse struct {
Cites []struct {
Title string `json:"title"`
Content string `json:"content"`
FileId string `json:"file_id"`
} `json:"cites"`
}
type BaichuanChatResponse struct {
openai.OpenAIProviderChatResponse
KnowledgeBase BaichuanKnowledgeBaseResponse `json:"knowledge_base,omitempty"`
}

View File

@@ -60,27 +60,27 @@ func (response *GeminiChatResponse) ResponseHandler(resp *http.Response) (OpenAI
}
// Setting safety to the lowest possible values since Gemini is already powerless enough
func (p *GeminiProvider) getChatRequestBody(request *types.ChatCompletionRequest) (requestBody *GeminiChatRequest) {
func (p *GeminiProvider) getChatRequestBody(request *types.ChatCompletionRequest) (requestBody *GeminiChatRequest, errWithCode *types.OpenAIErrorWithStatusCode) {
geminiRequest := GeminiChatRequest{
Contents: make([]GeminiChatContent, 0, len(request.Messages)),
//SafetySettings: []GeminiChatSafetySettings{
// {
// Category: "HARM_CATEGORY_HARASSMENT",
// Threshold: "BLOCK_ONLY_HIGH",
// },
// {
// Category: "HARM_CATEGORY_HATE_SPEECH",
// Threshold: "BLOCK_ONLY_HIGH",
// },
// {
// Category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
// Threshold: "BLOCK_ONLY_HIGH",
// },
// {
// Category: "HARM_CATEGORY_DANGEROUS_CONTENT",
// Threshold: "BLOCK_ONLY_HIGH",
// },
//},
SafetySettings: []GeminiChatSafetySettings{
{
Category: "HARM_CATEGORY_HARASSMENT",
Threshold: "BLOCK_NONE",
},
{
Category: "HARM_CATEGORY_HATE_SPEECH",
Threshold: "BLOCK_NONE",
},
{
Category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
Threshold: "BLOCK_NONE",
},
{
Category: "HARM_CATEGORY_DANGEROUS_CONTENT",
Threshold: "BLOCK_NONE",
},
},
GenerationConfig: GeminiChatGenerationConfig{
Temperature: request.Temperature,
TopP: request.TopP,
@@ -118,7 +118,10 @@ func (p *GeminiProvider) getChatRequestBody(request *types.ChatCompletionRequest
if imageNum > GeminiVisionMaxImageNum {
continue
}
mimeType, data, _ := image.GetImageFromUrl(part.ImageURL.URL)
mimeType, data, err := image.GetImageFromUrl(part.ImageURL.URL)
if err != nil {
return nil, common.ErrorWrapper(err, "image_url_invalid", http.StatusBadRequest)
}
parts = append(parts, GeminiPart{
InlineData: &GeminiInlineData{
MimeType: mimeType,
@@ -154,11 +157,14 @@ func (p *GeminiProvider) getChatRequestBody(request *types.ChatCompletionRequest
}
}
return &geminiRequest
return &geminiRequest, nil
}
func (p *GeminiProvider) ChatAction(request *types.ChatCompletionRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
requestBody := p.getChatRequestBody(request)
requestBody, errWithCode := p.getChatRequestBody(request)
if errWithCode != nil {
return
}
fullRequestURL := p.GetFullRequestURL("generateContent", request.Model)
headers := p.GetRequestHeaders()
if request.Stream {

View File

@@ -108,7 +108,7 @@ func (p *OpenAIProvider) GetRequestBody(request any, isModelMapped bool) (reques
}
// 发送流式请求
func (p *OpenAIProvider) sendStreamRequest(req *http.Request, response OpenAIProviderStreamResponseHandler) (openAIErrorWithStatusCode *types.OpenAIErrorWithStatusCode, responseText string) {
func (p *OpenAIProvider) SendStreamRequest(req *http.Request, response OpenAIProviderStreamResponseHandler) (openAIErrorWithStatusCode *types.OpenAIErrorWithStatusCode, responseText string) {
defer req.Body.Close()
client := common.GetHttpClient(p.Channel.Proxy)

View File

@@ -46,7 +46,7 @@ func (p *OpenAIProvider) ChatAction(request *types.ChatCompletionRequest, isMode
if request.Stream {
openAIProviderChatStreamResponse := &OpenAIProviderChatStreamResponse{}
var textResponse string
errWithCode, textResponse = p.sendStreamRequest(req, openAIProviderChatStreamResponse)
errWithCode, textResponse = p.SendStreamRequest(req, openAIProviderChatStreamResponse)
if errWithCode != nil {
return
}

View File

@@ -47,7 +47,7 @@ func (p *OpenAIProvider) CompleteAction(request *types.CompletionRequest, isMode
if request.Stream {
// TODO
var textResponse string
errWithCode, textResponse = p.sendStreamRequest(req, openAIProviderCompletionResponse)
errWithCode, textResponse = p.SendStreamRequest(req, openAIProviderCompletionResponse)
if errWithCode != nil {
return
}

View File

@@ -10,6 +10,7 @@ import (
"one-api/providers/api2gpt"
"one-api/providers/azure"
azurespeech "one-api/providers/azureSpeech"
"one-api/providers/baichuan"
"one-api/providers/baidu"
"one-api/providers/base"
"one-api/providers/claude"
@@ -52,6 +53,7 @@ func init() {
providerFactories[common.ChannelTypeAPI2GPT] = api2gpt.Api2gptProviderFactory{}
providerFactories[common.ChannelTypeAzureSpeech] = azurespeech.AzureSpeechProviderFactory{}
providerFactories[common.ChannelTypeGemini] = gemini.GeminiProviderFactory{}
providerFactories[common.ChannelTypeBaichuan] = baichuan.BaichuanProviderFactory{}
}

View File

@@ -58,7 +58,7 @@ func (p *XunfeiProvider) getXunfeiAuthUrl(apiKey string, apiSecret string) (stri
query := p.Context.Request.URL.Query()
apiVersion := query.Get("api-version")
if apiVersion == "" {
apiVersion = p.Channel.Key
apiVersion = p.Channel.Other
}
if apiVersion == "" {
apiVersion = "v1.1"

View File

@@ -67,7 +67,7 @@ func SetApiRouter(router *gin.Engine) {
{
channelRoute.GET("/", controller.GetAllChannels)
channelRoute.GET("/search", controller.SearchChannels)
channelRoute.GET("/models", controller.ListModels)
channelRoute.GET("/models", controller.ListModelsForAdmin)
channelRoute.GET("/:id", controller.GetChannel)
channelRoute.GET("/test", controller.TestAllChannels)
channelRoute.GET("/test/:id", controller.TestChannel)

View File

@@ -11,7 +11,7 @@ func SetRelayRouter(router *gin.Engine) {
router.Use(middleware.CORS())
// https://platform.openai.com/docs/api-reference/introduction
modelsRouter := router.Group("/v1/models")
modelsRouter.Use(middleware.TokenAuth())
modelsRouter.Use(middleware.TokenAuth(), middleware.Distribute())
{
modelsRouter.GET("", controller.ListModels)
modelsRouter.GET("/:model", controller.RetrieveModel)

View File

@@ -27,11 +27,11 @@ func (m ChatCompletionMessage) StringContent() string {
if !ok {
continue
}
if contentMap["type"] == "text" {
if subStr, ok := contentMap["text"].(string); ok {
contentStr += subStr
}
if subStr, ok := contentMap["text"].(string); ok && subStr != "" {
contentStr += subStr
}
}
return contentStr
}
@@ -55,23 +55,26 @@ func (m ChatCompletionMessage) ParseContent() []ChatMessagePart {
if !ok {
continue
}
switch contentMap["type"] {
case ContentTypeText:
if subStr, ok := contentMap["text"].(string); ok {
contentList = append(contentList, ChatMessagePart{
Type: ContentTypeText,
Text: subStr,
})
}
case ContentTypeImageURL:
if subObj, ok := contentMap["image_url"].(map[string]any); ok {
contentList = append(contentList, ChatMessagePart{
Type: ContentTypeImageURL,
ImageURL: &ChatMessageImageURL{
URL: subObj["url"].(string),
},
})
}
if subStr, ok := contentMap["text"].(string); ok && subStr != "" {
contentList = append(contentList, ChatMessagePart{
Type: ContentTypeText,
Text: subStr,
})
} else if subObj, ok := contentMap["image_url"].(map[string]any); ok {
contentList = append(contentList, ChatMessagePart{
Type: ContentTypeImageURL,
ImageURL: &ChatMessageImageURL{
URL: subObj["url"].(string),
},
})
} else if subObj, ok := contentMap["image"].(string); ok {
contentList = append(contentList, ChatMessagePart{
Type: ContentTypeImageURL,
ImageURL: &ChatMessageImageURL{
URL: subObj,
},
})
}
}
return contentList

View File

@@ -65,6 +65,12 @@ export const CHANNEL_OPTIONS = {
value: 23,
color: 'default'
},
26: {
key: 26,
text: '百川',
value: 26,
color: 'orange'
},
24: {
key: 24,
text: 'Azure Speech',

View File

@@ -36,6 +36,7 @@ const validationSchema = Yup.object().shape({
key: Yup.string().when('is_edit', { is: false, then: Yup.string().required('密钥 不能为空') }),
other: Yup.string(),
proxy: Yup.string(),
test_model: Yup.string(),
models: Yup.array().min(1, '模型 不能为空'),
groups: Yup.array().min(1, '用户组 不能为空'),
base_url: Yup.string().when('type', {
@@ -90,7 +91,7 @@ const EditModal = ({ open, channelId, onCancel, onOk }) => {
if (newInput) {
Object.keys(newInput).forEach((key) => {
if (
(!Array.isArray(values[key]) && values[key] !== null && values[key] !== undefined) ||
(!Array.isArray(values[key]) && values[key] !== null && values[key] !== undefined && values[key] !== '') ||
(Array.isArray(values[key]) && values[key].length > 0)
) {
return;
@@ -464,6 +465,29 @@ const EditModal = ({ open, channelId, onCancel, onOk }) => {
<FormHelperText id="helper-tex-channel-proxy-label"> {inputPrompt.proxy} </FormHelperText>
)}
</FormControl>
{inputPrompt.test_model && (
<FormControl fullWidth error={Boolean(touched.test_model && errors.test_model)} sx={{ ...theme.typography.otherInput }}>
<InputLabel htmlFor="channel-test_model-label">{inputLabel.test_model}</InputLabel>
<OutlinedInput
id="channel-test_model-label"
label={inputLabel.test_model}
type="text"
value={values.test_model}
name="test_model"
onBlur={handleBlur}
onChange={handleChange}
inputProps={{}}
aria-describedby="helper-text-channel-test_model-label"
/>
{touched.test_model && errors.test_model ? (
<FormHelperText error id="helper-tex-channel-test_model-label">
{errors.test_model}
</FormHelperText>
) : (
<FormHelperText id="helper-tex-channel-test_model-label"> {inputPrompt.test_model} </FormHelperText>
)}
</FormControl>
)}
<DialogActions>
<Button onClick={onCancel}>取消</Button>
<Button disableElevation disabled={isSubmitting} type="submit" variant="contained" color="primary">

View File

@@ -116,7 +116,7 @@ export default function ChannelPage() {
if (success) {
showSuccess('操作成功完成!');
if (action === 'delete') {
await loadChannels(0);
await handleRefresh();
}
} else {
showError(message);
@@ -127,9 +127,7 @@ export default function ChannelPage() {
// 处理刷新
const handleRefresh = async () => {
await loadChannels(0);
setActivePage(0);
setSearchKeyword('');
await loadChannels(activePage);
};
// 处理测试所有启用渠道

View File

@@ -6,6 +6,7 @@ const defaultConfig = {
base_url: '',
other: '',
proxy: '',
test_model: '',
model_mapping: '',
models: [],
groups: ['default']
@@ -17,6 +18,7 @@ const defaultConfig = {
key: '密钥',
other: '其他参数',
proxy: '代理地址',
test_model: '测速模型',
models: '模型',
model_mapping: '模型映射关系',
groups: '用户组'
@@ -28,6 +30,7 @@ const defaultConfig = {
key: '请输入渠道对应的鉴权密钥',
other: '',
proxy: '单独设置代理地址支持http和socks5例如http://127.0.0.1:1080',
test_model: '用于测试使用的模型,为空时无法测速,如gpt-3.5-turbo',
models: '请选择该渠道所支持的模型',
model_mapping:
'请输入要修改的模型映射关系格式为api请求模型ID:实际转发给渠道的模型ID使用JSON数组表示例如{"gpt-3.5": "gpt-35"}',
@@ -48,17 +51,20 @@ const typeConfig = {
},
11: {
input: {
models: ['PaLM-2']
models: ['PaLM-2'],
test_model: 'PaLM-2'
}
},
14: {
input: {
models: ['claude-instant-1', 'claude-2', 'claude-2.0', 'claude-2.1']
models: ['claude-instant-1', 'claude-2', 'claude-2.0', 'claude-2.1'],
test_model: 'claude-2'
}
},
15: {
input: {
models: ['ERNIE-Bot', 'ERNIE-Bot-turbo', 'ERNIE-Bot-4', 'Embedding-V1']
models: ['ERNIE-Bot', 'ERNIE-Bot-turbo', 'ERNIE-Bot-4', 'Embedding-V1'],
test_model: 'ERNIE-Bot'
},
prompt: {
key: '按照如下格式输入APIKey|SecretKey'
@@ -66,7 +72,8 @@ const typeConfig = {
},
16: {
input: {
models: ['chatglm_turbo', 'chatglm_pro', 'chatglm_std', 'chatglm_lite']
models: ['chatglm_turbo', 'chatglm_pro', 'chatglm_std', 'chatglm_lite'],
test_model: 'chatglm_lite'
}
},
17: {
@@ -84,7 +91,8 @@ const typeConfig = {
'qwen-plus-internet',
'qwen-max-internet',
'qwen-max-longcontext-internet'
]
],
test_model: 'qwen-turbo'
},
prompt: {
other: '请输入插件参数,即 X-DashScope-Plugin 请求头的取值'
@@ -104,7 +112,8 @@ const typeConfig = {
},
19: {
input: {
models: ['360GPT_S2_V9', 'embedding-bert-512-v1', 'embedding_s1_v1', 'semantic_similarity_s1_v1']
models: ['360GPT_S2_V9', 'embedding-bert-512-v1', 'embedding_s1_v1', 'semantic_similarity_s1_v1'],
test_model: '360GPT_S2_V9'
}
},
22: {
@@ -114,7 +123,8 @@ const typeConfig = {
},
23: {
input: {
models: ['hunyuan']
models: ['hunyuan'],
test_model: 'hunyuan'
},
prompt: {
key: '按照如下格式输入AppId|SecretId|SecretKey'
@@ -125,11 +135,26 @@ const typeConfig = {
other: '版本号'
},
input: {
models: ['gemini-pro']
models: ['gemini-pro', 'gemini-pro-vision'],
test_model: 'gemini-pro'
},
prompt: {
other: '请输入版本号例如v1'
}
},
26: {
input: {
models: ['Baichuan2-Turbo', 'Baichuan2-Turbo-192k', 'Baichuan2-53B', 'Baichuan-Text-Embedding'],
test_model: 'Baichuan2-Turbo'
}
},
24: {
input: {
models: ['tts-1', 'tts-1-hd']
},
prompt: {
test_model: ''
}
}
};

View File

@@ -108,7 +108,7 @@ export default function Token() {
if (success) {
showSuccess('操作成功完成!');
if (action === 'delete') {
await loadTokens(0);
await handleRefresh();
}
} else {
showError(message);
@@ -119,9 +119,7 @@ export default function Token() {
// 处理刷新
const handleRefresh = async () => {
await loadTokens(0);
setActivePage(0);
setSearchKeyword('');
await loadTokens(activePage);
};
const handleOpenModal = (tokenId) => {

View File

@@ -109,9 +109,7 @@ export default function Users() {
// 处理刷新
const handleRefresh = async () => {
await loadUsers(0);
setActivePage(0);
setSearchKeyword('');
await loadUsers(activePage);
};
const handleOpenModal = (userId) => {