mirror of
https://github.com/songquanpeng/one-api.git
synced 2026-01-13 18:35:59 +08:00
Compare commits
32 Commits
67e05ef74d
...
v0.6.10-al
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6eb0770a89 | ||
|
|
92cd46d64f | ||
|
|
2b2dc2c733 | ||
|
|
a3d7df7f89 | ||
|
|
c368232f50 | ||
|
|
cbfc983dc3 | ||
|
|
8ec092ba44 | ||
|
|
b0b88a79ff | ||
|
|
7e51b04221 | ||
|
|
f75a17f8eb | ||
|
|
6f13a3bb3c | ||
|
|
f092eed1db | ||
|
|
629378691b | ||
|
|
3716e1b0e6 | ||
|
|
a4d6e7a886 | ||
|
|
cb772e5d06 | ||
|
|
e32cb0b844 | ||
|
|
fdd7bf41c0 | ||
|
|
29389ed44f | ||
|
|
88acc5a614 | ||
|
|
a21681096a | ||
|
|
32f90a79a8 | ||
|
|
99c8c77504 | ||
|
|
649ecbf29c | ||
|
|
3a27c90910 | ||
|
|
cba82404ae | ||
|
|
c9ac670ba1 | ||
|
|
15f815c23c | ||
|
|
89b63ca96f | ||
|
|
8cc54489b9 | ||
|
|
58bf60805e | ||
|
|
6714cf96d6 |
@@ -90,6 +90,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
|
||||
+ [x] [together.ai](https://www.together.ai/)
|
||||
+ [x] [novita.ai](https://www.novita.ai/)
|
||||
+ [x] [硅基流动 SiliconCloud](https://siliconflow.cn/siliconcloud)
|
||||
+ [x] [xAI](https://x.ai/)
|
||||
2. 支持配置镜像以及众多[第三方代理服务](https://iamazing.cn/page/openai-api-third-party-services)。
|
||||
3. 支持通过**负载均衡**的方式访问多个渠道。
|
||||
4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。
|
||||
@@ -399,6 +400,7 @@ graph LR
|
||||
26. `METRIC_SUCCESS_RATE_THRESHOLD`:请求成功率阈值,默认为 `0.8`。
|
||||
27. `INITIAL_ROOT_TOKEN`:如果设置了该值,则在系统首次启动时会自动创建一个值为该环境变量值的 root 用户令牌。
|
||||
28. `INITIAL_ROOT_ACCESS_TOKEN`:如果设置了该值,则在系统首次启动时会自动创建一个值为该环境变量的 root 用户创建系统管理令牌。
|
||||
29. `ENFORCE_INCLUDE_USAGE`:是否强制在 stream 模型下返回 usage,默认不开启,可选值为 `true` 和 `false`。
|
||||
|
||||
### 命令行参数
|
||||
1. `--port <port_number>`: 指定服务器监听的端口号,默认为 `3000`。
|
||||
|
||||
@@ -71,8 +71,9 @@ var GitHubClientSecret = ""
|
||||
var LarkClientId = ""
|
||||
var LarkClientSecret = ""
|
||||
|
||||
var OidcAppId = ""
|
||||
var OidcAppSecret = ""
|
||||
var OidcClientId = ""
|
||||
var OidcClientSecret = ""
|
||||
var OidcWellKnown = ""
|
||||
var OidcAuthorizationEndpoint = ""
|
||||
var OidcTokenEndpoint = ""
|
||||
var OidcUserinfoEndpoint = ""
|
||||
@@ -159,3 +160,5 @@ var OnlyOneLogFile = env.Bool("ONLY_ONE_LOG_FILE", false)
|
||||
var RelayProxy = env.String("RELAY_PROXY", "")
|
||||
var UserContentRequestProxy = env.String("USER_CONTENT_REQUEST_PROXY", "")
|
||||
var UserContentRequestTimeout = env.Int("USER_CONTENT_REQUEST_TIMEOUT", 30)
|
||||
|
||||
var EnforceIncludeUsage = env.Bool("ENFORCE_INCLUDE_USAGE", false)
|
||||
|
||||
@@ -20,4 +20,5 @@ const (
|
||||
BaseURL = "base_url"
|
||||
AvailableModels = "available_models"
|
||||
KeyRequestBody = "key_request_body"
|
||||
SystemPrompt = "system_prompt"
|
||||
)
|
||||
|
||||
@@ -31,15 +31,15 @@ func UnmarshalBodyReusable(c *gin.Context, v any) error {
|
||||
contentType := c.Request.Header.Get("Content-Type")
|
||||
if strings.HasPrefix(contentType, "application/json") {
|
||||
err = json.Unmarshal(requestBody, &v)
|
||||
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
|
||||
} else {
|
||||
// skip for now
|
||||
// TODO: someday non json request have variant model, we will need to implementation this
|
||||
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
|
||||
err = c.ShouldBind(&v)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Reset request body
|
||||
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -137,3 +137,23 @@ func String2Int(str string) int {
|
||||
}
|
||||
return num
|
||||
}
|
||||
|
||||
func Float64PtrMax(p *float64, maxValue float64) *float64 {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
if *p > maxValue {
|
||||
return &maxValue
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func Float64PtrMin(p *float64, minValue float64) *float64 {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
if *p < minValue {
|
||||
return &minValue
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
@@ -38,8 +38,8 @@ func getOidcUserInfoByCode(code string) (*OidcUser, error) {
|
||||
return nil, errors.New("无效的参数")
|
||||
}
|
||||
values := map[string]string{
|
||||
"client_id": config.OidcAppId,
|
||||
"client_secret": config.OidcAppSecret,
|
||||
"client_id": config.OidcClientId,
|
||||
"client_secret": config.OidcClientSecret,
|
||||
"code": code,
|
||||
"grant_type": "authorization_code",
|
||||
"redirect_uri": fmt.Sprintf("%s/oauth/oidc", config.ServerAddress),
|
||||
|
||||
@@ -17,9 +17,11 @@ func GetSubscription(c *gin.Context) {
|
||||
if config.DisplayTokenStatEnabled {
|
||||
tokenId := c.GetInt(ctxkey.TokenId)
|
||||
token, err = model.GetTokenById(tokenId)
|
||||
expiredTime = token.ExpiredTime
|
||||
remainQuota = token.RemainQuota
|
||||
usedQuota = token.UsedQuota
|
||||
if err == nil {
|
||||
expiredTime = token.ExpiredTime
|
||||
remainQuota = token.RemainQuota
|
||||
usedQuota = token.UsedQuota
|
||||
}
|
||||
} else {
|
||||
userId := c.GetInt(ctxkey.Id)
|
||||
remainQuota, err = model.GetUserQuota(userId)
|
||||
|
||||
@@ -81,6 +81,26 @@ type APGC2DGPTUsageResponse struct {
|
||||
TotalUsed float64 `json:"total_used"`
|
||||
}
|
||||
|
||||
type SiliconFlowUsageResponse struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Status bool `json:"status"`
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Image string `json:"image"`
|
||||
Email string `json:"email"`
|
||||
IsAdmin bool `json:"isAdmin"`
|
||||
Balance string `json:"balance"`
|
||||
Status string `json:"status"`
|
||||
Introduction string `json:"introduction"`
|
||||
Role string `json:"role"`
|
||||
ChargeBalance string `json:"chargeBalance"`
|
||||
TotalBalance string `json:"totalBalance"`
|
||||
Category string `json:"category"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// GetAuthHeader get auth header
|
||||
func GetAuthHeader(token string) http.Header {
|
||||
h := http.Header{}
|
||||
@@ -203,6 +223,28 @@ func updateChannelAIGC2DBalance(channel *model.Channel) (float64, error) {
|
||||
return response.TotalAvailable, nil
|
||||
}
|
||||
|
||||
func updateChannelSiliconFlowBalance(channel *model.Channel) (float64, error) {
|
||||
url := "https://api.siliconflow.cn/v1/user/info"
|
||||
body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
response := SiliconFlowUsageResponse{}
|
||||
err = json.Unmarshal(body, &response)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if response.Code != 20000 {
|
||||
return 0, fmt.Errorf("code: %d, message: %s", response.Code, response.Message)
|
||||
}
|
||||
balance, err := strconv.ParseFloat(response.Data.Balance, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
channel.UpdateBalance(balance)
|
||||
return balance, nil
|
||||
}
|
||||
|
||||
func updateChannelBalance(channel *model.Channel) (float64, error) {
|
||||
baseURL := channeltype.ChannelBaseURLs[channel.Type]
|
||||
if channel.GetBaseURL() == "" {
|
||||
@@ -227,6 +269,8 @@ func updateChannelBalance(channel *model.Channel) (float64, error) {
|
||||
return updateChannelAPI2GPTBalance(channel)
|
||||
case channeltype.AIGC2D:
|
||||
return updateChannelAIGC2DBalance(channel)
|
||||
case channeltype.SiliconFlow:
|
||||
return updateChannelSiliconFlowBalance(channel)
|
||||
default:
|
||||
return 0, errors.New("尚未实现")
|
||||
}
|
||||
|
||||
@@ -76,9 +76,9 @@ func testChannel(channel *model.Channel, request *relaymodel.GeneralOpenAIReques
|
||||
if len(modelNames) > 0 {
|
||||
modelName = modelNames[0]
|
||||
}
|
||||
if modelMap != nil && modelMap[modelName] != "" {
|
||||
modelName = modelMap[modelName]
|
||||
}
|
||||
}
|
||||
if modelMap != nil && modelMap[modelName] != "" {
|
||||
modelName = modelMap[modelName]
|
||||
}
|
||||
meta.OriginModelName, meta.ActualModelName = request.Model, modelName
|
||||
request.Model = modelName
|
||||
|
||||
@@ -37,7 +37,8 @@ func GetStatus(c *gin.Context) {
|
||||
"quota_per_unit": config.QuotaPerUnit,
|
||||
"display_in_currency": config.DisplayInCurrencyEnabled,
|
||||
"oidc": config.OidcEnabled,
|
||||
"oidc_app_id": config.OidcAppId,
|
||||
"oidc_client_id": config.OidcClientId,
|
||||
"oidc_well_known": config.OidcWellKnown,
|
||||
"oidc_authorization_endpoint": config.OidcAuthorizationEndpoint,
|
||||
"oidc_token_endpoint": config.OidcTokenEndpoint,
|
||||
"oidc_userinfo_endpoint": config.OidcUserinfoEndpoint,
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
type ModelRequest struct {
|
||||
Model string `json:"model"`
|
||||
Model string `json:"model" form:"model"`
|
||||
}
|
||||
|
||||
func Distribute() func(c *gin.Context) {
|
||||
@@ -61,6 +61,9 @@ func SetupContextForSelectedChannel(c *gin.Context, channel *model.Channel, mode
|
||||
c.Set(ctxkey.Channel, channel.Type)
|
||||
c.Set(ctxkey.ChannelId, channel.Id)
|
||||
c.Set(ctxkey.ChannelName, channel.Name)
|
||||
if channel.SystemPrompt != nil && *channel.SystemPrompt != "" {
|
||||
c.Set(ctxkey.SystemPrompt, *channel.SystemPrompt)
|
||||
}
|
||||
c.Set(ctxkey.ModelMapping, channel.GetModelMapping())
|
||||
c.Set(ctxkey.OriginalModel, modelName) // for retry
|
||||
c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
|
||||
|
||||
@@ -37,6 +37,7 @@ type Channel struct {
|
||||
ModelMapping *string `json:"model_mapping" gorm:"type:varchar(1024);default:''"`
|
||||
Priority *int64 `json:"priority" gorm:"bigint;default:0"`
|
||||
Config string `json:"config"`
|
||||
SystemPrompt *string `json:"system_prompt" gorm:"type:text"`
|
||||
}
|
||||
|
||||
type ChannelConfig struct {
|
||||
|
||||
13
model/log.go
13
model/log.go
@@ -3,6 +3,7 @@ package model
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
@@ -152,7 +153,11 @@ func SearchUserLogs(userId int, keyword string) (logs []*Log, err error) {
|
||||
}
|
||||
|
||||
func SumUsedQuota(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string, channel int) (quota int64) {
|
||||
tx := LOG_DB.Table("logs").Select("ifnull(sum(quota),0)")
|
||||
ifnull := "ifnull"
|
||||
if common.UsingPostgreSQL {
|
||||
ifnull = "COALESCE"
|
||||
}
|
||||
tx := LOG_DB.Table("logs").Select(fmt.Sprintf("%s(sum(quota),0)", ifnull))
|
||||
if username != "" {
|
||||
tx = tx.Where("username = ?", username)
|
||||
}
|
||||
@@ -176,7 +181,11 @@ func SumUsedQuota(logType int, startTimestamp int64, endTimestamp int64, modelNa
|
||||
}
|
||||
|
||||
func SumUsedToken(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string) (token int) {
|
||||
tx := LOG_DB.Table("logs").Select("ifnull(sum(prompt_tokens),0) + ifnull(sum(completion_tokens),0)")
|
||||
ifnull := "ifnull"
|
||||
if common.UsingPostgreSQL {
|
||||
ifnull = "COALESCE"
|
||||
}
|
||||
tx := LOG_DB.Table("logs").Select(fmt.Sprintf("%s(sum(prompt_tokens),0) + %s(sum(completion_tokens),0)", ifnull, ifnull))
|
||||
if username != "" {
|
||||
tx = tx.Where("username = ?", username)
|
||||
}
|
||||
|
||||
@@ -179,10 +179,12 @@ func updateOptionMap(key string, value string) (err error) {
|
||||
config.LarkClientId = value
|
||||
case "LarkClientSecret":
|
||||
config.LarkClientSecret = value
|
||||
case "OidcAppId":
|
||||
config.OidcAppId = value
|
||||
case "OidcAppSecret":
|
||||
config.OidcAppSecret = value
|
||||
case "OidcClientId":
|
||||
config.OidcClientId = value
|
||||
case "OidcClientSecret":
|
||||
config.OidcClientSecret = value
|
||||
case "OidcWellKnown":
|
||||
config.OidcWellKnown = value
|
||||
case "OidcAuthorizationEndpoint":
|
||||
config.OidcAuthorizationEndpoint = value
|
||||
case "OidcTokenEndpoint":
|
||||
|
||||
@@ -30,7 +30,7 @@ type Token struct {
|
||||
RemainQuota int64 `json:"remain_quota" gorm:"bigint;default:0"`
|
||||
UnlimitedQuota bool `json:"unlimited_quota" gorm:"default:false"`
|
||||
UsedQuota int64 `json:"used_quota" gorm:"bigint;default:0"` // used quota
|
||||
Models *string `json:"models" gorm:"default:''"` // allowed models
|
||||
Models *string `json:"models" gorm:"type:text"` // allowed models
|
||||
Subnet *string `json:"subnet" gorm:"default:''"` // allowed subnet
|
||||
}
|
||||
|
||||
@@ -121,30 +121,40 @@ func GetTokenById(id int) (*Token, error) {
|
||||
return &token, err
|
||||
}
|
||||
|
||||
func (token *Token) Insert() error {
|
||||
func (t *Token) Insert() error {
|
||||
var err error
|
||||
err = DB.Create(token).Error
|
||||
err = DB.Create(t).Error
|
||||
return err
|
||||
}
|
||||
|
||||
// Update Make sure your token's fields is completed, because this will update non-zero values
|
||||
func (token *Token) Update() error {
|
||||
func (t *Token) Update() error {
|
||||
var err error
|
||||
err = DB.Model(token).Select("name", "status", "expired_time", "remain_quota", "unlimited_quota", "models", "subnet").Updates(token).Error
|
||||
err = DB.Model(t).Select("name", "status", "expired_time", "remain_quota", "unlimited_quota", "models", "subnet").Updates(t).Error
|
||||
return err
|
||||
}
|
||||
|
||||
func (token *Token) SelectUpdate() error {
|
||||
func (t *Token) SelectUpdate() error {
|
||||
// This can update zero values
|
||||
return DB.Model(token).Select("accessed_time", "status").Updates(token).Error
|
||||
return DB.Model(t).Select("accessed_time", "status").Updates(t).Error
|
||||
}
|
||||
|
||||
func (token *Token) Delete() error {
|
||||
func (t *Token) Delete() error {
|
||||
var err error
|
||||
err = DB.Delete(token).Error
|
||||
err = DB.Delete(t).Error
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *Token) GetModels() string {
|
||||
if t == nil {
|
||||
return ""
|
||||
}
|
||||
if t.Models == nil {
|
||||
return ""
|
||||
}
|
||||
return *t.Models
|
||||
}
|
||||
|
||||
func DeleteTokenById(id int, userId int) (err error) {
|
||||
// Why we need userId here? In case user want to delete other's token.
|
||||
if id == 0 || userId == 0 {
|
||||
@@ -254,14 +264,14 @@ func PreConsumeTokenQuota(tokenId int, quota int64) (err error) {
|
||||
|
||||
func PostConsumeTokenQuota(tokenId int, quota int64) (err error) {
|
||||
token, err := GetTokenById(tokenId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if quota > 0 {
|
||||
err = DecreaseUserQuota(token.UserId, quota)
|
||||
} else {
|
||||
err = IncreaseUserQuota(token.UserId, -quota)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !token.UnlimitedQuota {
|
||||
if quota > 0 {
|
||||
err = DecreaseTokenQuota(tokenId, quota)
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
func ShouldDisableChannel(err *model.Error, statusCode int) bool {
|
||||
@@ -18,31 +19,23 @@ func ShouldDisableChannel(err *model.Error, statusCode int) bool {
|
||||
return true
|
||||
}
|
||||
switch err.Type {
|
||||
case "insufficient_quota":
|
||||
return true
|
||||
// https://docs.anthropic.com/claude/reference/errors
|
||||
case "authentication_error":
|
||||
return true
|
||||
case "permission_error":
|
||||
return true
|
||||
case "forbidden":
|
||||
case "insufficient_quota", "authentication_error", "permission_error", "forbidden":
|
||||
return true
|
||||
}
|
||||
if err.Code == "invalid_api_key" || err.Code == "account_deactivated" {
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(err.Message, "Your credit balance is too low") { // anthropic
|
||||
return true
|
||||
} else if strings.HasPrefix(err.Message, "This organization has been disabled.") {
|
||||
return true
|
||||
}
|
||||
//if strings.Contains(err.Message, "quota") {
|
||||
// return true
|
||||
//}
|
||||
if strings.Contains(err.Message, "credit") {
|
||||
return true
|
||||
}
|
||||
if strings.Contains(err.Message, "balance") {
|
||||
|
||||
lowerMessage := strings.ToLower(err.Message)
|
||||
if strings.Contains(lowerMessage, "your access was terminated") ||
|
||||
strings.Contains(lowerMessage, "violation of our policies") ||
|
||||
strings.Contains(lowerMessage, "your credit balance is too low") ||
|
||||
strings.Contains(lowerMessage, "organization has been disabled") ||
|
||||
strings.Contains(lowerMessage, "credit") ||
|
||||
strings.Contains(lowerMessage, "balance") ||
|
||||
strings.Contains(lowerMessage, "permission denied") ||
|
||||
strings.Contains(lowerMessage, "organization has been restricted") || // groq
|
||||
strings.Contains(lowerMessage, "已欠费") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
@@ -3,6 +3,7 @@ package ali
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -35,9 +36,7 @@ func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
|
||||
enableSearch = true
|
||||
aliModel = strings.TrimSuffix(aliModel, EnableSearchModelSuffix)
|
||||
}
|
||||
if request.TopP >= 1 {
|
||||
request.TopP = 0.9999
|
||||
}
|
||||
request.TopP = helper.Float64PtrMax(request.TopP, 0.9999)
|
||||
return &ChatRequest{
|
||||
Model: aliModel,
|
||||
Input: Input{
|
||||
@@ -59,7 +58,7 @@ func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
|
||||
|
||||
func ConvertEmbeddingRequest(request model.GeneralOpenAIRequest) *EmbeddingRequest {
|
||||
return &EmbeddingRequest{
|
||||
Model: "text-embedding-v1",
|
||||
Model: request.Model,
|
||||
Input: struct {
|
||||
Texts []string `json:"texts"`
|
||||
}{
|
||||
@@ -102,8 +101,9 @@ func EmbeddingHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStat
|
||||
StatusCode: resp.StatusCode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
requestModel := c.GetString(ctxkey.RequestModel)
|
||||
fullTextResponse := embeddingResponseAli2OpenAI(&aliResponse)
|
||||
fullTextResponse.Model = requestModel
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
|
||||
@@ -16,13 +16,13 @@ type Input struct {
|
||||
}
|
||||
|
||||
type Parameters struct {
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
Seed uint64 `json:"seed,omitempty"`
|
||||
EnableSearch bool `json:"enable_search,omitempty"`
|
||||
IncrementalOutput bool `json:"incremental_output,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
ResultFormat string `json:"result_format,omitempty"`
|
||||
Tools []model.Tool `json:"tools,omitempty"`
|
||||
}
|
||||
|
||||
@@ -3,7 +3,11 @@ package anthropic
|
||||
var ModelList = []string{
|
||||
"claude-instant-1.2", "claude-2.0", "claude-2.1",
|
||||
"claude-3-haiku-20240307",
|
||||
"claude-3-5-haiku-20241022",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
"claude-3-5-sonnet-latest",
|
||||
"claude-3-5-haiku-20241022",
|
||||
}
|
||||
|
||||
@@ -48,8 +48,8 @@ type Request struct {
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
Tools []Tool `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
|
||||
@@ -29,10 +29,13 @@ var AwsModelIDMap = map[string]string{
|
||||
"claude-instant-1.2": "anthropic.claude-instant-v1",
|
||||
"claude-2.0": "anthropic.claude-v2",
|
||||
"claude-2.1": "anthropic.claude-v2:1",
|
||||
"claude-3-sonnet-20240229": "anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"claude-3-5-sonnet-20240620": "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"claude-3-opus-20240229": "anthropic.claude-3-opus-20240229-v1:0",
|
||||
"claude-3-haiku-20240307": "anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"claude-3-sonnet-20240229": "anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"claude-3-opus-20240229": "anthropic.claude-3-opus-20240229-v1:0",
|
||||
"claude-3-5-sonnet-20240620": "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"claude-3-5-sonnet-20241022": "anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
"claude-3-5-sonnet-latest": "anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
"claude-3-5-haiku-20241022": "anthropic.claude-3-5-haiku-20241022-v1:0",
|
||||
}
|
||||
|
||||
func awsModelID(requestModel string) (string, error) {
|
||||
|
||||
@@ -11,8 +11,8 @@ type Request struct {
|
||||
Messages []anthropic.Message `json:"messages"`
|
||||
System string `json:"system,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
Tools []anthropic.Tool `json:"tools,omitempty"`
|
||||
|
||||
@@ -4,10 +4,10 @@ package aws
|
||||
//
|
||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-meta.html
|
||||
type Request struct {
|
||||
Prompt string `json:"prompt"`
|
||||
MaxGenLen int `json:"max_gen_len,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
Prompt string `json:"prompt"`
|
||||
MaxGenLen int `json:"max_gen_len,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
}
|
||||
|
||||
// Response is the response from AWS Llama3
|
||||
|
||||
@@ -35,9 +35,9 @@ type Message struct {
|
||||
|
||||
type ChatRequest struct {
|
||||
Messages []Message `json:"messages"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
PenaltyScore float64 `json:"penalty_score,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
PenaltyScore *float64 `json:"penalty_score,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
System string `json:"system,omitempty"`
|
||||
DisableSearch bool `json:"disable_search,omitempty"`
|
||||
|
||||
@@ -9,5 +9,5 @@ type Request struct {
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
Raw bool `json:"raw,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
|
||||
K: textRequest.TopK,
|
||||
Stream: textRequest.Stream,
|
||||
FrequencyPenalty: textRequest.FrequencyPenalty,
|
||||
PresencePenalty: textRequest.FrequencyPenalty,
|
||||
PresencePenalty: textRequest.PresencePenalty,
|
||||
Seed: int(textRequest.Seed),
|
||||
}
|
||||
if cohereRequest.Model == "" {
|
||||
|
||||
@@ -10,15 +10,15 @@ type Request struct {
|
||||
PromptTruncation string `json:"prompt_truncation,omitempty"` // 默认值为"AUTO"
|
||||
Connectors []Connector `json:"connectors,omitempty"`
|
||||
Documents []Document `json:"documents,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"` // 默认值为0.3
|
||||
Temperature *float64 `json:"temperature,omitempty"` // 默认值为0.3
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
MaxInputTokens int `json:"max_input_tokens,omitempty"`
|
||||
K int `json:"k,omitempty"` // 默认值为0
|
||||
P float64 `json:"p,omitempty"` // 默认值为0.75
|
||||
P *float64 `json:"p,omitempty"` // 默认值为0.75
|
||||
Seed int `json:"seed,omitempty"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"` // 默认值为0.0
|
||||
PresencePenalty float64 `json:"presence_penalty,omitempty"` // 默认值为0.0
|
||||
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` // 默认值为0.0
|
||||
PresencePenalty *float64 `json:"presence_penalty,omitempty"` // 默认值为0.0
|
||||
Tools []Tool `json:"tools,omitempty"`
|
||||
ToolResults []ToolResult `json:"tool_results,omitempty"`
|
||||
}
|
||||
|
||||
@@ -4,11 +4,12 @@ import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
@@ -28,6 +29,11 @@ const (
|
||||
VisionMaxImageNum = 16
|
||||
)
|
||||
|
||||
var mimeTypeMap = map[string]string{
|
||||
"json_object": "application/json",
|
||||
"text": "text/plain",
|
||||
}
|
||||
|
||||
// Setting safety to the lowest possible values since Gemini is already powerless enough
|
||||
func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
|
||||
geminiRequest := ChatRequest{
|
||||
@@ -56,6 +62,15 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
|
||||
MaxOutputTokens: textRequest.MaxTokens,
|
||||
},
|
||||
}
|
||||
if textRequest.ResponseFormat != nil {
|
||||
if mimeType, ok := mimeTypeMap[textRequest.ResponseFormat.Type]; ok {
|
||||
geminiRequest.GenerationConfig.ResponseMimeType = mimeType
|
||||
}
|
||||
if textRequest.ResponseFormat.JsonSchema != nil {
|
||||
geminiRequest.GenerationConfig.ResponseSchema = textRequest.ResponseFormat.JsonSchema.Schema
|
||||
geminiRequest.GenerationConfig.ResponseMimeType = mimeTypeMap["json_object"]
|
||||
}
|
||||
}
|
||||
if textRequest.Tools != nil {
|
||||
functions := make([]model.Function, 0, len(textRequest.Tools))
|
||||
for _, tool := range textRequest.Tools {
|
||||
|
||||
@@ -65,10 +65,12 @@ type ChatTools struct {
|
||||
}
|
||||
|
||||
type ChatGenerationConfig struct {
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"topP,omitempty"`
|
||||
TopK float64 `json:"topK,omitempty"`
|
||||
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
|
||||
CandidateCount int `json:"candidateCount,omitempty"`
|
||||
StopSequences []string `json:"stopSequences,omitempty"`
|
||||
ResponseMimeType string `json:"responseMimeType,omitempty"`
|
||||
ResponseSchema any `json:"responseSchema,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP *float64 `json:"topP,omitempty"`
|
||||
TopK float64 `json:"topK,omitempty"`
|
||||
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
|
||||
CandidateCount int `json:"candidateCount,omitempty"`
|
||||
StopSequences []string `json:"stopSequences,omitempty"`
|
||||
}
|
||||
|
||||
@@ -4,14 +4,24 @@ package groq
|
||||
|
||||
var ModelList = []string{
|
||||
"gemma-7b-it",
|
||||
"mixtral-8x7b-32768",
|
||||
"llama3-8b-8192",
|
||||
"llama3-70b-8192",
|
||||
"gemma2-9b-it",
|
||||
"llama-3.1-405b-reasoning",
|
||||
"llama-3.1-70b-versatile",
|
||||
"llama-3.1-8b-instant",
|
||||
"llama-3.2-11b-text-preview",
|
||||
"llama-3.2-11b-vision-preview",
|
||||
"llama-3.2-1b-preview",
|
||||
"llama-3.2-3b-preview",
|
||||
"llama-3.2-11b-vision-preview",
|
||||
"llama-3.2-90b-text-preview",
|
||||
"llama-3.2-90b-vision-preview",
|
||||
"llama-guard-3-8b",
|
||||
"llama3-70b-8192",
|
||||
"llama3-8b-8192",
|
||||
"llama3-groq-70b-8192-tool-use-preview",
|
||||
"llama3-groq-8b-8192-tool-use-preview",
|
||||
"llava-v1.5-7b-4096-preview",
|
||||
"mixtral-8x7b-32768",
|
||||
"distil-whisper-large-v3-en",
|
||||
"whisper-large-v3",
|
||||
"whisper-large-v3-turbo",
|
||||
}
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
package ollama
|
||||
|
||||
type Options struct {
|
||||
Seed int `json:"seed,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
||||
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
||||
NumPredict int `json:"num_predict,omitempty"`
|
||||
NumCtx int `json:"num_ctx,omitempty"`
|
||||
Seed int `json:"seed,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
|
||||
PresencePenalty *float64 `json:"presence_penalty,omitempty"`
|
||||
NumPredict int `json:"num_predict,omitempty"`
|
||||
NumCtx int `json:"num_ctx,omitempty"`
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
|
||||
@@ -75,6 +75,13 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
|
||||
if request == nil {
|
||||
return nil, errors.New("request is nil")
|
||||
}
|
||||
if request.Stream {
|
||||
// always return usage in stream mode
|
||||
if request.StreamOptions == nil {
|
||||
request.StreamOptions = &model.StreamOptions{}
|
||||
}
|
||||
request.StreamOptions.IncludeUsage = true
|
||||
}
|
||||
return request, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -11,9 +11,10 @@ import (
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/mistral"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/moonshot"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/novita"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/siliconflow"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/stepfun"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/togetherai"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/siliconflow"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/xai"
|
||||
"github.com/songquanpeng/one-api/relay/channeltype"
|
||||
)
|
||||
|
||||
@@ -32,6 +33,7 @@ var CompatibleChannels = []int{
|
||||
channeltype.TogetherAI,
|
||||
channeltype.Novita,
|
||||
channeltype.SiliconFlow,
|
||||
channeltype.XAI,
|
||||
}
|
||||
|
||||
func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
||||
@@ -64,6 +66,8 @@ func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
||||
return "novita", novita.ModelList
|
||||
case channeltype.SiliconFlow:
|
||||
return "siliconflow", siliconflow.ModelList
|
||||
case channeltype.XAI:
|
||||
return "xai", xai.ModelList
|
||||
default:
|
||||
return "openai", ModelList
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@ var ModelList = []string{
|
||||
"gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613",
|
||||
"gpt-4-turbo-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
|
||||
"gpt-4o", "gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
"chatgpt-4o-latest",
|
||||
"gpt-4o-mini", "gpt-4o-mini-2024-07-18",
|
||||
"gpt-4-vision-preview",
|
||||
"text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large",
|
||||
|
||||
@@ -55,8 +55,8 @@ func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.E
|
||||
render.StringData(c, data) // if error happened, pass the data to client
|
||||
continue // just ignore the error
|
||||
}
|
||||
if len(streamResponse.Choices) == 0 {
|
||||
// but for empty choice, we should not pass it to client, this is for azure
|
||||
if len(streamResponse.Choices) == 0 && streamResponse.Usage == nil {
|
||||
// but for empty choice and no usage, we should not pass it to client, this is for azure
|
||||
continue // just ignore empty choice
|
||||
}
|
||||
render.StringData(c, data)
|
||||
|
||||
@@ -19,11 +19,11 @@ type Prompt struct {
|
||||
}
|
||||
|
||||
type ChatRequest struct {
|
||||
Prompt Prompt `json:"prompt"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
CandidateCount int `json:"candidateCount,omitempty"`
|
||||
TopP float64 `json:"topP,omitempty"`
|
||||
TopK int `json:"topK,omitempty"`
|
||||
Prompt Prompt `json:"prompt"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
CandidateCount int `json:"candidateCount,omitempty"`
|
||||
TopP *float64 `json:"topP,omitempty"`
|
||||
TopK int `json:"topK,omitempty"`
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
package stepfun
|
||||
|
||||
var ModelList = []string{
|
||||
"step-1-8k",
|
||||
"step-1-32k",
|
||||
"step-1-128k",
|
||||
"step-1-256k",
|
||||
"step-1-flash",
|
||||
"step-2-16k",
|
||||
"step-1v-8k",
|
||||
"step-1v-32k",
|
||||
"step-1-200k",
|
||||
"step-1x-medium",
|
||||
}
|
||||
|
||||
@@ -5,4 +5,5 @@ var ModelList = []string{
|
||||
"hunyuan-standard",
|
||||
"hunyuan-standard-256K",
|
||||
"hunyuan-pro",
|
||||
"hunyuan-vision",
|
||||
}
|
||||
|
||||
@@ -39,8 +39,8 @@ func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
|
||||
Model: &request.Model,
|
||||
Stream: &request.Stream,
|
||||
Messages: messages,
|
||||
TopP: &request.TopP,
|
||||
Temperature: &request.Temperature,
|
||||
TopP: request.TopP,
|
||||
Temperature: request.Temperature,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,12 @@ import (
|
||||
)
|
||||
|
||||
var ModelList = []string{
|
||||
"claude-3-haiku@20240307", "claude-3-opus@20240229", "claude-3-5-sonnet@20240620", "claude-3-sonnet@20240229",
|
||||
"claude-3-haiku@20240307",
|
||||
"claude-3-sonnet@20240229",
|
||||
"claude-3-opus@20240229",
|
||||
"claude-3-5-sonnet@20240620",
|
||||
"claude-3-5-sonnet-v2@20241022",
|
||||
"claude-3-5-haiku@20241022",
|
||||
}
|
||||
|
||||
const anthropicVersion = "vertex-2023-10-16"
|
||||
|
||||
@@ -11,8 +11,8 @@ type Request struct {
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
Tools []anthropic.Tool `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
var ModelList = []string{
|
||||
"gemini-1.5-pro-001", "gemini-1.5-flash-001", "gemini-pro", "gemini-pro-vision",
|
||||
"gemini-1.5-pro-001", "gemini-1.5-flash-001", "gemini-pro", "gemini-pro-vision", "gemini-1.5-pro-002", "gemini-1.5-flash-002",
|
||||
}
|
||||
|
||||
type Adaptor struct {
|
||||
|
||||
5
relay/adaptor/xai/constants.go
Normal file
5
relay/adaptor/xai/constants.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package xai
|
||||
|
||||
var ModelList = []string{
|
||||
"grok-beta",
|
||||
}
|
||||
@@ -5,6 +5,8 @@ var ModelList = []string{
|
||||
"SparkDesk-v1.1",
|
||||
"SparkDesk-v2.1",
|
||||
"SparkDesk-v3.1",
|
||||
"SparkDesk-v3.1-128K",
|
||||
"SparkDesk-v3.5",
|
||||
"SparkDesk-v3.5-32K",
|
||||
"SparkDesk-v4.0",
|
||||
}
|
||||
|
||||
@@ -272,9 +272,9 @@ func xunfeiMakeRequest(textRequest model.GeneralOpenAIRequest, domain, authUrl,
|
||||
}
|
||||
|
||||
func parseAPIVersionByModelName(modelName string) string {
|
||||
parts := strings.Split(modelName, "-")
|
||||
if len(parts) == 2 {
|
||||
return parts[1]
|
||||
index := strings.IndexAny(modelName, "-")
|
||||
if index != -1 {
|
||||
return modelName[index+1:]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -283,13 +283,17 @@ func parseAPIVersionByModelName(modelName string) string {
|
||||
func apiVersion2domain(apiVersion string) string {
|
||||
switch apiVersion {
|
||||
case "v1.1":
|
||||
return "general"
|
||||
return "lite"
|
||||
case "v2.1":
|
||||
return "generalv2"
|
||||
case "v3.1":
|
||||
return "generalv3"
|
||||
case "v3.1-128K":
|
||||
return "pro-128k"
|
||||
case "v3.5":
|
||||
return "generalv3.5"
|
||||
case "v3.5-32K":
|
||||
return "max-32k"
|
||||
case "v4.0":
|
||||
return "4.0Ultra"
|
||||
}
|
||||
@@ -297,7 +301,17 @@ func apiVersion2domain(apiVersion string) string {
|
||||
}
|
||||
|
||||
func getXunfeiAuthUrl(apiVersion string, apiKey string, apiSecret string) (string, string) {
|
||||
var authUrl string
|
||||
domain := apiVersion2domain(apiVersion)
|
||||
authUrl := buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/%s/chat", apiVersion), apiKey, apiSecret)
|
||||
switch apiVersion {
|
||||
case "v3.1-128K":
|
||||
authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/chat/pro-128k"), apiKey, apiSecret)
|
||||
break
|
||||
case "v3.5-32K":
|
||||
authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/chat/max-32k"), apiKey, apiSecret)
|
||||
break
|
||||
default:
|
||||
authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/%s/chat", apiVersion), apiKey, apiSecret)
|
||||
}
|
||||
return domain, authUrl
|
||||
}
|
||||
|
||||
@@ -19,11 +19,11 @@ type ChatRequest struct {
|
||||
} `json:"header"`
|
||||
Parameter struct {
|
||||
Chat struct {
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Auditing bool `json:"auditing,omitempty"`
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Auditing bool `json:"auditing,omitempty"`
|
||||
} `json:"chat"`
|
||||
} `json:"parameter"`
|
||||
Payload struct {
|
||||
|
||||
@@ -4,13 +4,13 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
@@ -65,13 +65,13 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
|
||||
baiduEmbeddingRequest, err := ConvertEmbeddingRequest(*request)
|
||||
return baiduEmbeddingRequest, err
|
||||
default:
|
||||
// TopP (0.0, 1.0)
|
||||
request.TopP = math.Min(0.99, request.TopP)
|
||||
request.TopP = math.Max(0.01, request.TopP)
|
||||
// TopP [0.0, 1.0]
|
||||
request.TopP = helper.Float64PtrMax(request.TopP, 1)
|
||||
request.TopP = helper.Float64PtrMin(request.TopP, 0)
|
||||
|
||||
// Temperature (0.0, 1.0)
|
||||
request.Temperature = math.Min(0.99, request.Temperature)
|
||||
request.Temperature = math.Max(0.01, request.Temperature)
|
||||
// Temperature [0.0, 1.0]
|
||||
request.Temperature = helper.Float64PtrMax(request.Temperature, 1)
|
||||
request.Temperature = helper.Float64PtrMin(request.Temperature, 0)
|
||||
a.SetVersionByModeName(request.Model)
|
||||
if a.APIVersion == "v4" {
|
||||
return request, nil
|
||||
|
||||
@@ -12,8 +12,8 @@ type Message struct {
|
||||
|
||||
type Request struct {
|
||||
Prompt []Message `json:"prompt"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
RequestId string `json:"request_id,omitempty"`
|
||||
Incremental bool `json:"incremental,omitempty"`
|
||||
}
|
||||
|
||||
@@ -30,6 +30,14 @@ var ImageSizeRatios = map[string]map[string]float64{
|
||||
"720x1280": 1,
|
||||
"1280x720": 1,
|
||||
},
|
||||
"step-1x-medium": {
|
||||
"256x256": 1,
|
||||
"512x512": 1,
|
||||
"768x768": 1,
|
||||
"1024x1024": 1,
|
||||
"1280x800": 1,
|
||||
"800x1280": 1,
|
||||
},
|
||||
}
|
||||
|
||||
var ImageGenerationAmounts = map[string][2]int{
|
||||
@@ -39,6 +47,7 @@ var ImageGenerationAmounts = map[string][2]int{
|
||||
"ali-stable-diffusion-v1.5": {1, 4}, // Ali
|
||||
"wanx-v1": {1, 4}, // Ali
|
||||
"cogview-3": {1, 1},
|
||||
"step-1x-medium": {1, 1},
|
||||
}
|
||||
|
||||
var ImagePromptLengthLimitations = map[string]int{
|
||||
@@ -48,6 +57,7 @@ var ImagePromptLengthLimitations = map[string]int{
|
||||
"ali-stable-diffusion-v1.5": 4000,
|
||||
"wanx-v1": 4000,
|
||||
"cogview-3": 833,
|
||||
"step-1x-medium": 4000,
|
||||
}
|
||||
|
||||
var ImageOriginModelName = map[string]string{
|
||||
|
||||
@@ -34,7 +34,9 @@ var ModelRatio = map[string]float64{
|
||||
"gpt-4-turbo": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-turbo-2024-04-09": 5, // $0.01 / 1K tokens
|
||||
"gpt-4o": 2.5, // $0.005 / 1K tokens
|
||||
"chatgpt-4o-latest": 2.5, // $0.005 / 1K tokens
|
||||
"gpt-4o-2024-05-13": 2.5, // $0.005 / 1K tokens
|
||||
"gpt-4o-2024-08-06": 1.25, // $0.0025 / 1K tokens
|
||||
"gpt-4o-mini": 0.075, // $0.00015 / 1K tokens
|
||||
"gpt-4o-mini-2024-07-18": 0.075, // $0.00015 / 1K tokens
|
||||
"gpt-4-vision-preview": 5, // $0.01 / 1K tokens
|
||||
@@ -77,8 +79,10 @@ var ModelRatio = map[string]float64{
|
||||
"claude-2.0": 8.0 / 1000 * USD,
|
||||
"claude-2.1": 8.0 / 1000 * USD,
|
||||
"claude-3-haiku-20240307": 0.25 / 1000 * USD,
|
||||
"claude-3-5-haiku-20241022": 1.0 / 1000 * USD,
|
||||
"claude-3-sonnet-20240229": 3.0 / 1000 * USD,
|
||||
"claude-3-5-sonnet-20240620": 3.0 / 1000 * USD,
|
||||
"claude-3-5-sonnet-20241022": 3.0 / 1000 * USD,
|
||||
"claude-3-opus-20240229": 15.0 / 1000 * USD,
|
||||
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7
|
||||
"ERNIE-4.0-8K": 0.120 * RMB,
|
||||
@@ -126,7 +130,9 @@ var ModelRatio = map[string]float64{
|
||||
"SparkDesk-v1.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.1-128K": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.5-32K": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v4.0": 1.2858, // ¥0.018 / 1k tokens
|
||||
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
|
||||
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
@@ -158,23 +164,34 @@ var ModelRatio = map[string]float64{
|
||||
"mistral-embed": 0.1 / 1000 * USD,
|
||||
// https://wow.groq.com/#:~:text=inquiries%C2%A0here.-,Model,-Current%20Speed
|
||||
"gemma-7b-it": 0.07 / 1000000 * USD,
|
||||
"mixtral-8x7b-32768": 0.24 / 1000000 * USD,
|
||||
"llama3-8b-8192": 0.05 / 1000000 * USD,
|
||||
"llama3-70b-8192": 0.59 / 1000000 * USD,
|
||||
"gemma2-9b-it": 0.20 / 1000000 * USD,
|
||||
"llama-3.1-405b-reasoning": 0.89 / 1000000 * USD,
|
||||
"llama-3.1-70b-versatile": 0.59 / 1000000 * USD,
|
||||
"llama-3.1-8b-instant": 0.05 / 1000000 * USD,
|
||||
"llama-3.2-11b-text-preview": 0.05 / 1000000 * USD,
|
||||
"llama-3.2-11b-vision-preview": 0.05 / 1000000 * USD,
|
||||
"llama-3.2-1b-preview": 0.05 / 1000000 * USD,
|
||||
"llama-3.2-3b-preview": 0.05 / 1000000 * USD,
|
||||
"llama-3.2-90b-text-preview": 0.59 / 1000000 * USD,
|
||||
"llama-guard-3-8b": 0.05 / 1000000 * USD,
|
||||
"llama3-70b-8192": 0.59 / 1000000 * USD,
|
||||
"llama3-8b-8192": 0.05 / 1000000 * USD,
|
||||
"llama3-groq-70b-8192-tool-use-preview": 0.89 / 1000000 * USD,
|
||||
"llama3-groq-8b-8192-tool-use-preview": 0.19 / 1000000 * USD,
|
||||
"mixtral-8x7b-32768": 0.24 / 1000000 * USD,
|
||||
|
||||
// https://platform.lingyiwanwu.com/docs#-计费单元
|
||||
"yi-34b-chat-0205": 2.5 / 1000 * RMB,
|
||||
"yi-34b-chat-200k": 12.0 / 1000 * RMB,
|
||||
"yi-vl-plus": 6.0 / 1000 * RMB,
|
||||
// stepfun todo
|
||||
"step-1v-32k": 0.024 * RMB,
|
||||
"step-1-32k": 0.024 * RMB,
|
||||
"step-1-200k": 0.15 * RMB,
|
||||
// https://platform.stepfun.com/docs/pricing/details
|
||||
"step-1-8k": 0.005 / 1000 * RMB,
|
||||
"step-1-32k": 0.015 / 1000 * RMB,
|
||||
"step-1-128k": 0.040 / 1000 * RMB,
|
||||
"step-1-256k": 0.095 / 1000 * RMB,
|
||||
"step-1-flash": 0.001 / 1000 * RMB,
|
||||
"step-2-16k": 0.038 / 1000 * RMB,
|
||||
"step-1v-8k": 0.005 / 1000 * RMB,
|
||||
"step-1v-32k": 0.015 / 1000 * RMB,
|
||||
// aws llama3 https://aws.amazon.com/cn/bedrock/pricing/
|
||||
"llama3-8b-8192(33)": 0.0003 / 0.002, // $0.0003 / 1K tokens
|
||||
"llama3-70b-8192(33)": 0.00265 / 0.002, // $0.00265 / 1K tokens
|
||||
@@ -192,6 +209,8 @@ var ModelRatio = map[string]float64{
|
||||
"deepl-zh": 25.0 / 1000 * USD,
|
||||
"deepl-en": 25.0 / 1000 * USD,
|
||||
"deepl-ja": 25.0 / 1000 * USD,
|
||||
// https://console.x.ai/
|
||||
"grok-beta": 5.0 / 1000 * USD,
|
||||
}
|
||||
|
||||
var CompletionRatio = map[string]float64{
|
||||
@@ -200,8 +219,10 @@ var CompletionRatio = map[string]float64{
|
||||
"llama3-70b-8192(33)": 0.0035 / 0.00265,
|
||||
}
|
||||
|
||||
var DefaultModelRatio map[string]float64
|
||||
var DefaultCompletionRatio map[string]float64
|
||||
var (
|
||||
DefaultModelRatio map[string]float64
|
||||
DefaultCompletionRatio map[string]float64
|
||||
)
|
||||
|
||||
func init() {
|
||||
DefaultModelRatio = make(map[string]float64)
|
||||
@@ -313,7 +334,7 @@ func GetCompletionRatio(name string, channelType int) float64 {
|
||||
return 4.0 / 3.0
|
||||
}
|
||||
if strings.HasPrefix(name, "gpt-4") {
|
||||
if strings.HasPrefix(name, "gpt-4o-mini") {
|
||||
if strings.HasPrefix(name, "gpt-4o-mini") || name == "gpt-4o-2024-08-06" {
|
||||
return 4
|
||||
}
|
||||
if strings.HasPrefix(name, "gpt-4-turbo") ||
|
||||
@@ -323,6 +344,9 @@ func GetCompletionRatio(name string, channelType int) float64 {
|
||||
}
|
||||
return 2
|
||||
}
|
||||
if name == "chatgpt-4o-latest" {
|
||||
return 3
|
||||
}
|
||||
if strings.HasPrefix(name, "claude-3") {
|
||||
return 5
|
||||
}
|
||||
@@ -351,6 +375,8 @@ func GetCompletionRatio(name string, channelType int) float64 {
|
||||
return 3
|
||||
case "command-r-plus":
|
||||
return 5
|
||||
case "grok-beta":
|
||||
return 3
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -46,5 +46,6 @@ const (
|
||||
VertextAI
|
||||
Proxy
|
||||
SiliconFlow
|
||||
XAI
|
||||
Dummy
|
||||
)
|
||||
|
||||
@@ -45,7 +45,8 @@ var ChannelBaseURLs = []string{
|
||||
"https://api.novita.ai/v3/openai", // 41
|
||||
"", // 42
|
||||
"", // 43
|
||||
"https://api.siliconflow.cn", // 44
|
||||
"https://api.siliconflow.cn", // 44
|
||||
"https://api.x.ai", // 45
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package role
|
||||
|
||||
const (
|
||||
System = "system"
|
||||
Assistant = "assistant"
|
||||
)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/relay/constant/role"
|
||||
"math"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -154,3 +155,22 @@ func isErrorHappened(meta *meta.Meta, resp *http.Response) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func setSystemPrompt(ctx context.Context, request *relaymodel.GeneralOpenAIRequest, prompt string) {
|
||||
if prompt == "" {
|
||||
return
|
||||
}
|
||||
if len(request.Messages) == 0 {
|
||||
return
|
||||
}
|
||||
if request.Messages[0].Role == role.System {
|
||||
request.Messages[0].Content = prompt
|
||||
logger.Infof(ctx, "rewrite system prompt")
|
||||
return
|
||||
}
|
||||
request.Messages = append([]relaymodel.Message{{
|
||||
Role: role.System,
|
||||
Content: prompt,
|
||||
}}, request.Messages...)
|
||||
logger.Infof(ctx, "add system prompt")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
@@ -35,6 +36,8 @@ func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode {
|
||||
meta.OriginModelName = textRequest.Model
|
||||
textRequest.Model, _ = getMappedModelName(textRequest.Model, meta.ModelMapping)
|
||||
meta.ActualModelName = textRequest.Model
|
||||
// set system prompt if not empty
|
||||
setSystemPrompt(ctx, textRequest, meta.SystemPrompt)
|
||||
// get model ratio & group ratio
|
||||
modelRatio := billingratio.GetModelRatio(textRequest.Model, meta.ChannelType)
|
||||
groupRatio := billingratio.GetGroupRatio(meta.Group)
|
||||
@@ -84,7 +87,7 @@ func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode {
|
||||
}
|
||||
|
||||
func getRequestBody(c *gin.Context, meta *meta.Meta, textRequest *model.GeneralOpenAIRequest, adaptor adaptor.Adaptor) (io.Reader, error) {
|
||||
if meta.APIType == apitype.OpenAI && meta.OriginModelName == meta.ActualModelName && meta.ChannelType != channeltype.Baichuan {
|
||||
if !config.EnforceIncludeUsage && meta.APIType == apitype.OpenAI && meta.OriginModelName == meta.ActualModelName && meta.ChannelType != channeltype.Baichuan {
|
||||
// no need to convert request for openai
|
||||
return c.Request.Body, nil
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ type Meta struct {
|
||||
ActualModelName string
|
||||
RequestURLPath string
|
||||
PromptTokens int // only for DoResponse
|
||||
SystemPrompt string
|
||||
}
|
||||
|
||||
func GetByContext(c *gin.Context) *Meta {
|
||||
@@ -46,6 +47,7 @@ func GetByContext(c *gin.Context) *Meta {
|
||||
BaseURL: c.GetString(ctxkey.BaseURL),
|
||||
APIKey: strings.TrimPrefix(c.Request.Header.Get("Authorization"), "Bearer "),
|
||||
RequestURLPath: c.Request.URL.String(),
|
||||
SystemPrompt: c.GetString(ctxkey.SystemPrompt),
|
||||
}
|
||||
cfg, ok := c.Get(ctxkey.Config)
|
||||
if ok {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package model
|
||||
|
||||
const (
|
||||
ContentTypeText = "text"
|
||||
ContentTypeImageURL = "image_url"
|
||||
ContentTypeText = "text"
|
||||
ContentTypeImageURL = "image_url"
|
||||
ContentTypeInputAudio = "input_audio"
|
||||
)
|
||||
|
||||
@@ -1,35 +1,70 @@
|
||||
package model
|
||||
|
||||
type ResponseFormat struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
JsonSchema *JSONSchema `json:"json_schema,omitempty"`
|
||||
}
|
||||
|
||||
type JSONSchema struct {
|
||||
Description string `json:"description,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Schema map[string]interface{} `json:"schema,omitempty"`
|
||||
Strict *bool `json:"strict,omitempty"`
|
||||
}
|
||||
|
||||
type Audio struct {
|
||||
Voice string `json:"voice,omitempty"`
|
||||
Format string `json:"format,omitempty"`
|
||||
}
|
||||
|
||||
type StreamOptions struct {
|
||||
IncludeUsage bool `json:"include_usage,omitempty"`
|
||||
}
|
||||
|
||||
type GeneralOpenAIRequest struct {
|
||||
Messages []Message `json:"messages,omitempty"`
|
||||
Model string `json:"model,omitempty"`
|
||||
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
N int `json:"n,omitempty"`
|
||||
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
||||
ResponseFormat *ResponseFormat `json:"response_format,omitempty"`
|
||||
Seed float64 `json:"seed,omitempty"`
|
||||
Stop any `json:"stop,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
Tools []Tool `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
FunctionCall any `json:"function_call,omitempty"`
|
||||
Functions any `json:"functions,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
Prompt any `json:"prompt,omitempty"`
|
||||
Input any `json:"input,omitempty"`
|
||||
EncodingFormat string `json:"encoding_format,omitempty"`
|
||||
Dimensions int `json:"dimensions,omitempty"`
|
||||
Instruction string `json:"instruction,omitempty"`
|
||||
Size string `json:"size,omitempty"`
|
||||
NumCtx int `json:"num_ctx,omitempty"`
|
||||
// https://platform.openai.com/docs/api-reference/chat/create
|
||||
Messages []Message `json:"messages,omitempty"`
|
||||
Model string `json:"model,omitempty"`
|
||||
Store *bool `json:"store,omitempty"`
|
||||
Metadata any `json:"metadata,omitempty"`
|
||||
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
|
||||
LogitBias any `json:"logit_bias,omitempty"`
|
||||
Logprobs *bool `json:"logprobs,omitempty"`
|
||||
TopLogprobs *int `json:"top_logprobs,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
MaxCompletionTokens *int `json:"max_completion_tokens,omitempty"`
|
||||
N int `json:"n,omitempty"`
|
||||
Modalities []string `json:"modalities,omitempty"`
|
||||
Prediction any `json:"prediction,omitempty"`
|
||||
Audio *Audio `json:"audio,omitempty"`
|
||||
PresencePenalty *float64 `json:"presence_penalty,omitempty"`
|
||||
ResponseFormat *ResponseFormat `json:"response_format,omitempty"`
|
||||
Seed float64 `json:"seed,omitempty"`
|
||||
ServiceTier *string `json:"service_tier,omitempty"`
|
||||
Stop any `json:"stop,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
StreamOptions *StreamOptions `json:"stream_options,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
Tools []Tool `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
ParallelTooCalls *bool `json:"parallel_tool_calls,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
FunctionCall any `json:"function_call,omitempty"`
|
||||
Functions any `json:"functions,omitempty"`
|
||||
// https://platform.openai.com/docs/api-reference/embeddings/create
|
||||
Input any `json:"input,omitempty"`
|
||||
EncodingFormat string `json:"encoding_format,omitempty"`
|
||||
Dimensions int `json:"dimensions,omitempty"`
|
||||
// https://platform.openai.com/docs/api-reference/images/create
|
||||
Prompt any `json:"prompt,omitempty"`
|
||||
Quality *string `json:"quality,omitempty"`
|
||||
Size string `json:"size,omitempty"`
|
||||
Style *string `json:"style,omitempty"`
|
||||
// Others
|
||||
Instruction string `json:"instruction,omitempty"`
|
||||
NumCtx int `json:"num_ctx,omitempty"`
|
||||
}
|
||||
|
||||
func (r GeneralOpenAIRequest) ParseInput() []string {
|
||||
|
||||
@@ -11,12 +11,14 @@ import EditToken from '../pages/Token/EditToken';
|
||||
const COPY_OPTIONS = [
|
||||
{ key: 'next', text: 'ChatGPT Next Web', value: 'next' },
|
||||
{ key: 'ama', text: 'ChatGPT Web & Midjourney', value: 'ama' },
|
||||
{ key: 'opencat', text: 'OpenCat', value: 'opencat' }
|
||||
{ key: 'opencat', text: 'OpenCat', value: 'opencat' },
|
||||
{ key: 'lobechat', text: 'LobeChat', value: 'lobechat' },
|
||||
];
|
||||
|
||||
const OPEN_LINK_OPTIONS = [
|
||||
{ key: 'ama', text: 'ChatGPT Web & Midjourney', value: 'ama' },
|
||||
{ key: 'opencat', text: 'OpenCat', value: 'opencat' }
|
||||
{ key: 'opencat', text: 'OpenCat', value: 'opencat' },
|
||||
{ key: 'lobechat', text: 'LobeChat', value: 'lobechat' }
|
||||
];
|
||||
|
||||
function renderTimestamp(timestamp) {
|
||||
@@ -60,7 +62,12 @@ const TokensTable = () => {
|
||||
onOpenLink('next-mj');
|
||||
}
|
||||
},
|
||||
{ node: 'item', key: 'opencat', name: 'OpenCat', value: 'opencat' }
|
||||
{ node: 'item', key: 'opencat', name: 'OpenCat', value: 'opencat' },
|
||||
{
|
||||
node: 'item', key: 'lobechat', name: 'LobeChat', onClick: () => {
|
||||
onOpenLink('lobechat');
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const columns = [
|
||||
@@ -177,6 +184,11 @@ const TokensTable = () => {
|
||||
node: 'item', key: 'opencat', name: 'OpenCat', onClick: () => {
|
||||
onOpenLink('opencat', record.key);
|
||||
}
|
||||
},
|
||||
{
|
||||
node: 'item', key: 'lobechat', name: 'LobeChat', onClick: () => {
|
||||
onOpenLink('lobechat');
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -382,6 +394,9 @@ const TokensTable = () => {
|
||||
case 'next-mj':
|
||||
url = mjLink + `/#/?settings={"key":"sk-${key}","url":"${serverAddress}"}`;
|
||||
break;
|
||||
case 'lobechat':
|
||||
url = chatLink + `/?settings={"keyVaults":{"openai":{"apiKey":"sk-${key}","baseURL":"${serverAddress}/v1"}}}`;
|
||||
break;
|
||||
default:
|
||||
if (!chatLink) {
|
||||
showError('管理员未设置聊天链接');
|
||||
|
||||
@@ -30,6 +30,7 @@ export const CHANNEL_OPTIONS = [
|
||||
{ key: 42, text: 'VertexAI', value: 42, color: 'blue' },
|
||||
{ key: 43, text: 'Proxy', value: 43, color: 'blue' },
|
||||
{ key: 44, text: 'SiliconFlow', value: 44, color: 'blue' },
|
||||
{ key: 45, text: 'xAI', value: 45, color: 'blue' },
|
||||
{ key: 8, text: '自定义渠道', value: 8, color: 'pink' },
|
||||
{ key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' },
|
||||
{ key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' },
|
||||
|
||||
@@ -63,7 +63,7 @@ const EditChannel = (props) => {
|
||||
let localModels = [];
|
||||
switch (value) {
|
||||
case 14:
|
||||
localModels = ["claude-instant-1.2", "claude-2", "claude-2.0", "claude-2.1", "claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307", "claude-3-5-sonnet-20240620"];
|
||||
localModels = ["claude-instant-1.2", "claude-2", "claude-2.0", "claude-2.1", "claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307", "claude-3-5-haiku-20241022", "claude-3-5-sonnet-20240620", "claude-3-5-sonnet-20241022"];
|
||||
break;
|
||||
case 11:
|
||||
localModels = ['PaLM-2'];
|
||||
@@ -78,7 +78,7 @@ const EditChannel = (props) => {
|
||||
localModels = ['chatglm_pro', 'chatglm_std', 'chatglm_lite'];
|
||||
break;
|
||||
case 18:
|
||||
localModels = ['SparkDesk', 'SparkDesk-v1.1', 'SparkDesk-v2.1', 'SparkDesk-v3.1', 'SparkDesk-v3.5', 'SparkDesk-v4.0'];
|
||||
localModels = ['SparkDesk', 'SparkDesk-v1.1', 'SparkDesk-v2.1', 'SparkDesk-v3.1', 'SparkDesk-v3.1-128K', 'SparkDesk-v3.5', 'SparkDesk-v3.5-32K', 'SparkDesk-v4.0'];
|
||||
break;
|
||||
case 19:
|
||||
localModels = ['360GPT_S2_V9', 'embedding-bert-512-v1', 'embedding_s1_v1', 'semantic_similarity_s1_v1'];
|
||||
|
||||
@@ -24,7 +24,7 @@ const config = {
|
||||
wechat_login: false,
|
||||
wechat_qrcode: '',
|
||||
oidc: false,
|
||||
oidc_app_id: '',
|
||||
oidc_client_id: '',
|
||||
oidc_authorization_endpoint: '',
|
||||
oidc_token_endpoint: '',
|
||||
oidc_userinfo_endpoint: '',
|
||||
|
||||
@@ -179,6 +179,12 @@ export const CHANNEL_OPTIONS = {
|
||||
value: 44,
|
||||
color: 'primary'
|
||||
},
|
||||
45: {
|
||||
key: 45,
|
||||
text: 'xAI',
|
||||
value: 45,
|
||||
color: 'primary'
|
||||
},
|
||||
41: {
|
||||
key: 41,
|
||||
text: 'Novita',
|
||||
|
||||
@@ -152,7 +152,7 @@ const LoginForm = ({ ...others }) => {
|
||||
<Button
|
||||
disableElevation
|
||||
fullWidth
|
||||
onClick={() => onOidcClicked(siteInfo.oidc_authorization_endpoint,siteInfo.oidc_app_id)}
|
||||
onClick={() => onOidcClicked(siteInfo.oidc_authorization_endpoint,siteInfo.oidc_client_id)}
|
||||
size="large"
|
||||
variant="outlined"
|
||||
sx={{
|
||||
|
||||
@@ -268,6 +268,8 @@ function renderBalance(type, balance) {
|
||||
return <span>¥{balance.toFixed(2)}</span>;
|
||||
case 13: // AIGC2D
|
||||
return <span>{renderNumber(balance)}</span>;
|
||||
case 44: // SiliconFlow
|
||||
return <span>¥{balance.toFixed(2)}</span>;
|
||||
default:
|
||||
return <span>不支持</span>;
|
||||
}
|
||||
|
||||
@@ -91,7 +91,7 @@ const typeConfig = {
|
||||
other: '版本号'
|
||||
},
|
||||
input: {
|
||||
models: ['SparkDesk', 'SparkDesk-v1.1', 'SparkDesk-v2.1', 'SparkDesk-v3.1', 'SparkDesk-v3.5', 'SparkDesk-v4.0']
|
||||
models: ['SparkDesk', 'SparkDesk-v1.1', 'SparkDesk-v2.1', 'SparkDesk-v3.1', 'SparkDesk-v3.1-128K', 'SparkDesk-v3.5', 'SparkDesk-v3.5-32K', 'SparkDesk-v4.0']
|
||||
},
|
||||
prompt: {
|
||||
key: '按照如下格式输入:APPID|APISecret|APIKey',
|
||||
@@ -223,6 +223,9 @@ const typeConfig = {
|
||||
},
|
||||
modelGroup: 'anthropic'
|
||||
},
|
||||
45: {
|
||||
modelGroup: 'xai'
|
||||
},
|
||||
};
|
||||
|
||||
export { defaultConfig, typeConfig };
|
||||
|
||||
@@ -231,7 +231,7 @@ export default function Profile() {
|
||||
)}
|
||||
{status.oidc && !inputs.oidc_id && (
|
||||
<Grid xs={12} md={4}>
|
||||
<Button variant="contained" onClick={() => onOidcClicked(status.oidc_authorization_endpoint,status.oidc_app_id,true)}>
|
||||
<Button variant="contained" onClick={() => onOidcClicked(status.oidc_authorization_endpoint,status.oidc_client_id,true)}>
|
||||
绑定 OIDC 账号
|
||||
</Button>
|
||||
</Grid>
|
||||
|
||||
@@ -34,8 +34,9 @@ const SystemSetting = () => {
|
||||
LarkClientId: '',
|
||||
LarkClientSecret: '',
|
||||
OidcEnabled: '',
|
||||
OidcAppId: '',
|
||||
OidcAppSecret: '',
|
||||
OidcWellKnown: '',
|
||||
OidcClientId: '',
|
||||
OidcClientSecret: '',
|
||||
OidcAuthorizationEndpoint: '',
|
||||
OidcTokenEndpoint: '',
|
||||
OidcUserinfoEndpoint: '',
|
||||
@@ -150,8 +151,9 @@ const SystemSetting = () => {
|
||||
name === 'MessagePusherToken' ||
|
||||
name === 'LarkClientId' ||
|
||||
name === 'LarkClientSecret' ||
|
||||
name === 'OidcAppId' ||
|
||||
name === 'OidcAppSecret' ||
|
||||
name === 'OidcClientId' ||
|
||||
name === 'OidcClientSecret' ||
|
||||
name === 'OidcWellKnown' ||
|
||||
name === 'OidcAuthorizationEndpoint' ||
|
||||
name === 'OidcTokenEndpoint' ||
|
||||
name === 'OidcUserinfoEndpoint'
|
||||
@@ -239,19 +241,30 @@ const SystemSetting = () => {
|
||||
};
|
||||
|
||||
const submitOidc = async () => {
|
||||
const OidcConfig = {
|
||||
OidcAppId: inputs.OidcAppId,
|
||||
OidcAppSecret: inputs.OidcAppSecret,
|
||||
OidcAuthorizationEndpoint: inputs.OidcAuthorizationEndpoint,
|
||||
OidcTokenEndpoint: inputs.OidcTokenEndpoint,
|
||||
OidcUserinfoEndpoint: inputs.OidcUserinfoEndpoint
|
||||
};
|
||||
console.log(OidcConfig);
|
||||
if (originInputs['OidcAppId'] !== inputs.OidcAppId) {
|
||||
await updateOption('OidcAppId', inputs.OidcAppId);
|
||||
if (inputs.OidcWellKnown !== '') {
|
||||
if (!inputs.OidcWellKnown.startsWith('http://') && !inputs.OidcWellKnown.startsWith('https://')) {
|
||||
showError('Well-Known URL 必须以 http:// 或 https:// 开头');
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const res = await API.get(inputs.OidcWellKnown);
|
||||
inputs.OidcAuthorizationEndpoint = res.data['authorization_endpoint'];
|
||||
inputs.OidcTokenEndpoint = res.data['token_endpoint'];
|
||||
inputs.OidcUserinfoEndpoint = res.data['userinfo_endpoint'];
|
||||
showSuccess('获取 OIDC 配置成功!');
|
||||
} catch (err) {
|
||||
showError("获取 OIDC 配置失败,请检查网络状况和 Well-Known URL 是否正确");
|
||||
}
|
||||
}
|
||||
if (originInputs['OidcAppSecret'] !== inputs.OidcAppSecret && inputs.OidcAppSecret !== '') {
|
||||
await updateOption('OidcAppSecret', inputs.OidcAppSecret);
|
||||
|
||||
if (originInputs['OidcWellKnown'] !== inputs.OidcWellKnown) {
|
||||
await updateOption('OidcWellKnown', inputs.OidcWellKnown);
|
||||
}
|
||||
if (originInputs['OidcClientId'] !== inputs.OidcClientId) {
|
||||
await updateOption('OidcClientId', inputs.OidcClientId);
|
||||
}
|
||||
if (originInputs['OidcClientSecret'] !== inputs.OidcClientSecret && inputs.OidcClientSecret !== '') {
|
||||
await updateOption('OidcClientSecret', inputs.OidcClientSecret);
|
||||
}
|
||||
if (originInputs['OidcAuthorizationEndpoint'] !== inputs.OidcAuthorizationEndpoint) {
|
||||
await updateOption('OidcAuthorizationEndpoint', inputs.OidcAuthorizationEndpoint);
|
||||
@@ -332,7 +345,7 @@ const SystemSetting = () => {
|
||||
</Grid>
|
||||
<Grid xs={12} md={3}>
|
||||
<FormControlLabel
|
||||
label="允许通过 Oidc 登录 & 注册"
|
||||
label="允许通过 OIDC 登录 & 注册"
|
||||
control={<Checkbox checked={inputs.OidcEnabled === 'true'} onChange={handleInputChange} name="OidcEnabled" />}
|
||||
/>
|
||||
</Grid>
|
||||
@@ -675,31 +688,34 @@ const SystemSetting = () => {
|
||||
<Alert severity="info" sx={ { wordWrap: 'break-word' } }>
|
||||
主页链接填 <code>{ inputs.ServerAddress }</code>
|
||||
,重定向 URL 填 <code>{ `${ inputs.ServerAddress }/oauth/oidc` }</code>
|
||||
</Alert> <br />
|
||||
<Alert severity="info" sx={ { wordWrap: 'break-word' } }>
|
||||
若你的 OIDC Provider 支持 Discovery Endpoint,你可以仅填写 OIDC Well-Known URL,系统会自动获取 OIDC 配置
|
||||
</Alert>
|
||||
</Grid>
|
||||
<Grid xs={ 12 } md={ 6 }>
|
||||
<FormControl fullWidth>
|
||||
<InputLabel htmlFor="OidcAppId">App ID</InputLabel>
|
||||
<InputLabel htmlFor="OidcClientId">Client ID</InputLabel>
|
||||
<OutlinedInput
|
||||
id="OidcAppId"
|
||||
name="OidcAppId"
|
||||
value={ inputs.OidcAppId || '' }
|
||||
id="OidcClientId"
|
||||
name="OidcClientId"
|
||||
value={ inputs.OidcClientId || '' }
|
||||
onChange={ handleInputChange }
|
||||
label="App ID"
|
||||
placeholder="输入 OAuth 2.0 的 App ID"
|
||||
label="Client ID"
|
||||
placeholder="输入 OIDC 的 Client ID"
|
||||
disabled={ loading }
|
||||
/>
|
||||
</FormControl>
|
||||
</Grid>
|
||||
<Grid xs={ 12 } md={ 6 }>
|
||||
<FormControl fullWidth>
|
||||
<InputLabel htmlFor="OidcAppSecret">App Secret</InputLabel>
|
||||
<InputLabel htmlFor="OidcClientSecret">Client Secret</InputLabel>
|
||||
<OutlinedInput
|
||||
id="OidcAppSecret"
|
||||
name="OidcAppSecret"
|
||||
value={ inputs.OidcAppSecret || '' }
|
||||
id="OidcClientSecret"
|
||||
name="OidcClientSecret"
|
||||
value={ inputs.OidcClientSecret || '' }
|
||||
onChange={ handleInputChange }
|
||||
label="App Secret"
|
||||
label="Client Secret"
|
||||
placeholder="敏感信息不会发送到前端显示"
|
||||
disabled={ loading }
|
||||
/>
|
||||
@@ -707,49 +723,63 @@ const SystemSetting = () => {
|
||||
</Grid>
|
||||
<Grid xs={ 12 } md={ 6 }>
|
||||
<FormControl fullWidth>
|
||||
<InputLabel htmlFor="OidcAuthorizationEndpoint">授权地址</InputLabel>
|
||||
<InputLabel htmlFor="OidcWellKnown">Well-Known URL</InputLabel>
|
||||
<OutlinedInput
|
||||
id="OidcWellKnown"
|
||||
name="OidcWellKnown"
|
||||
value={ inputs.OidcWellKnown || '' }
|
||||
onChange={ handleInputChange }
|
||||
label="Well-Known URL"
|
||||
placeholder="请输入 OIDC 的 Well-Known URL"
|
||||
disabled={ loading }
|
||||
/>
|
||||
</FormControl>
|
||||
</Grid>
|
||||
<Grid xs={ 12 } md={ 6 }>
|
||||
<FormControl fullWidth>
|
||||
<InputLabel htmlFor="OidcAuthorizationEndpoint">Authorization Endpoint</InputLabel>
|
||||
<OutlinedInput
|
||||
id="OidcAuthorizationEndpoint"
|
||||
name="OidcAuthorizationEndpoint"
|
||||
value={ inputs.OidcAuthorizationEndpoint || '' }
|
||||
onChange={ handleInputChange }
|
||||
label="授权地址"
|
||||
placeholder="输入 OAuth 2.0 的 授权地址"
|
||||
label="Authorization Endpoint"
|
||||
placeholder="输入 OIDC 的 Authorization Endpoint"
|
||||
disabled={ loading }
|
||||
/>
|
||||
</FormControl>
|
||||
</Grid>
|
||||
<Grid xs={ 12 } md={ 6 }>
|
||||
<FormControl fullWidth>
|
||||
<InputLabel htmlFor="OidcTokenEndpoint">认证地址</InputLabel>
|
||||
<InputLabel htmlFor="OidcTokenEndpoint">Token Endpoint</InputLabel>
|
||||
<OutlinedInput
|
||||
id="OidcTokenEndpoint"
|
||||
name="OidcTokenEndpoint"
|
||||
value={ inputs.OidcTokenEndpoint || '' }
|
||||
onChange={ handleInputChange }
|
||||
label="认证地址"
|
||||
placeholder="输入 OAuth 2.0 的 认证地址"
|
||||
label="Token Endpoint"
|
||||
placeholder="输入 OIDC 的 Token Endpoint"
|
||||
disabled={ loading }
|
||||
/>
|
||||
</FormControl>
|
||||
</Grid>
|
||||
<Grid xs={ 12 } md={ 6 }>
|
||||
<FormControl fullWidth>
|
||||
<InputLabel htmlFor="OidcUserinfoEndpoint">用户地址</InputLabel>
|
||||
<InputLabel htmlFor="OidcUserinfoEndpoint">Userinfo Endpoint</InputLabel>
|
||||
<OutlinedInput
|
||||
id="OidcUserinfoEndpoint"
|
||||
name="OidcUserinfoEndpoint"
|
||||
value={ inputs.OidcUserinfoEndpoint || '' }
|
||||
onChange={ handleInputChange }
|
||||
label="认证地址"
|
||||
placeholder="输入 OAuth 2.0 的 认证地址"
|
||||
label="Userinfo Endpoint"
|
||||
placeholder="输入 OIDC 的 Userinfo Endpoint"
|
||||
disabled={ loading }
|
||||
/>
|
||||
</FormControl>
|
||||
</Grid>
|
||||
<Grid xs={ 12 }>
|
||||
<Button variant="contained" onClick={ submitOidc }>
|
||||
保存第三方 OAuth 2.0 设置
|
||||
保存 OIDC 设置
|
||||
</Button>
|
||||
</Grid>
|
||||
</Grid>
|
||||
|
||||
@@ -32,7 +32,8 @@ const COPY_OPTIONS = [
|
||||
encode: false
|
||||
},
|
||||
{ key: 'ama', text: 'BotGem', url: 'ama://set-api-key?server={serverAddress}&key=sk-{key}', encode: true },
|
||||
{ key: 'opencat', text: 'OpenCat', url: 'opencat://team/join?domain={serverAddress}&token=sk-{key}', encode: true }
|
||||
{ key: 'opencat', text: 'OpenCat', url: 'opencat://team/join?domain={serverAddress}&token=sk-{key}', encode: true },
|
||||
{ key: 'lobechat', text: 'LobeChat', url: 'https://lobehub.com/?settings={"keyVaults":{"openai":{"apiKey":"sk-{key}","baseURL":"{serverAddress}"}}}', encode: true }
|
||||
];
|
||||
|
||||
function replacePlaceholders(text, key, serverAddress) {
|
||||
|
||||
@@ -52,11 +52,19 @@ function renderBalance(type, balance) {
|
||||
return <span>¥{balance.toFixed(2)}</span>;
|
||||
case 13: // AIGC2D
|
||||
return <span>{renderNumber(balance)}</span>;
|
||||
case 44: // SiliconFlow
|
||||
return <span>¥{balance.toFixed(2)}</span>;
|
||||
default:
|
||||
return <span>不支持</span>;
|
||||
}
|
||||
}
|
||||
|
||||
function isShowDetail() {
|
||||
return localStorage.getItem("show_detail") === "true";
|
||||
}
|
||||
|
||||
const promptID = "detail"
|
||||
|
||||
const ChannelsTable = () => {
|
||||
const [channels, setChannels] = useState([]);
|
||||
const [loading, setLoading] = useState(true);
|
||||
@@ -64,7 +72,8 @@ const ChannelsTable = () => {
|
||||
const [searchKeyword, setSearchKeyword] = useState('');
|
||||
const [searching, setSearching] = useState(false);
|
||||
const [updatingBalance, setUpdatingBalance] = useState(false);
|
||||
const [showPrompt, setShowPrompt] = useState(shouldShowPrompt("channel-test"));
|
||||
const [showPrompt, setShowPrompt] = useState(shouldShowPrompt(promptID));
|
||||
const [showDetail, setShowDetail] = useState(isShowDetail());
|
||||
|
||||
const loadChannels = async (startIdx) => {
|
||||
const res = await API.get(`/api/channel/?p=${startIdx}`);
|
||||
@@ -118,6 +127,11 @@ const ChannelsTable = () => {
|
||||
await loadChannels(activePage - 1);
|
||||
};
|
||||
|
||||
const toggleShowDetail = () => {
|
||||
setShowDetail(!showDetail);
|
||||
localStorage.setItem("show_detail", (!showDetail).toString());
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
loadChannels(0)
|
||||
.then()
|
||||
@@ -362,11 +376,13 @@ const ChannelsTable = () => {
|
||||
showPrompt && (
|
||||
<Message onDismiss={() => {
|
||||
setShowPrompt(false);
|
||||
setPromptShown("channel-test");
|
||||
setPromptShown(promptID);
|
||||
}}>
|
||||
OpenAI 渠道已经不再支持通过 key 获取余额,因此余额显示为 0。对于支持的渠道类型,请点击余额进行刷新。
|
||||
<br/>
|
||||
渠道测试仅支持 chat 模型,优先使用 gpt-3.5-turbo,如果该模型不可用则使用你所配置的模型列表中的第一个模型。
|
||||
<br/>
|
||||
点击下方详情按钮可以显示余额以及设置额外的测试模型。
|
||||
</Message>
|
||||
)
|
||||
}
|
||||
@@ -426,6 +442,7 @@ const ChannelsTable = () => {
|
||||
onClick={() => {
|
||||
sortChannel('balance');
|
||||
}}
|
||||
hidden={!showDetail}
|
||||
>
|
||||
余额
|
||||
</Table.HeaderCell>
|
||||
@@ -437,7 +454,7 @@ const ChannelsTable = () => {
|
||||
>
|
||||
优先级
|
||||
</Table.HeaderCell>
|
||||
<Table.HeaderCell>测试模型</Table.HeaderCell>
|
||||
<Table.HeaderCell hidden={!showDetail}>测试模型</Table.HeaderCell>
|
||||
<Table.HeaderCell>操作</Table.HeaderCell>
|
||||
</Table.Row>
|
||||
</Table.Header>
|
||||
@@ -465,7 +482,7 @@ const ChannelsTable = () => {
|
||||
basic
|
||||
/>
|
||||
</Table.Cell>
|
||||
<Table.Cell>
|
||||
<Table.Cell hidden={!showDetail}>
|
||||
<Popup
|
||||
trigger={<span onClick={() => {
|
||||
updateChannelBalance(channel.id, channel.name, idx);
|
||||
@@ -492,7 +509,7 @@ const ChannelsTable = () => {
|
||||
basic
|
||||
/>
|
||||
</Table.Cell>
|
||||
<Table.Cell>
|
||||
<Table.Cell hidden={!showDetail}>
|
||||
<Dropdown
|
||||
placeholder='请选择测试模型'
|
||||
selection
|
||||
@@ -571,7 +588,7 @@ const ChannelsTable = () => {
|
||||
|
||||
<Table.Footer>
|
||||
<Table.Row>
|
||||
<Table.HeaderCell colSpan='9'>
|
||||
<Table.HeaderCell colSpan={showDetail ? "10" : "8"}>
|
||||
<Button size='small' as={Link} to='/channel/add' loading={loading}>
|
||||
添加新的渠道
|
||||
</Button>
|
||||
@@ -609,6 +626,7 @@ const ChannelsTable = () => {
|
||||
}
|
||||
/>
|
||||
<Button size='small' onClick={refresh} loading={loading}>刷新</Button>
|
||||
<Button size='small' onClick={toggleShowDetail}>{showDetail ? "隐藏详情" : "详情"}</Button>
|
||||
</Table.HeaderCell>
|
||||
</Table.Row>
|
||||
</Table.Footer>
|
||||
|
||||
@@ -10,12 +10,14 @@ const COPY_OPTIONS = [
|
||||
{ key: 'next', text: 'ChatGPT Next Web', value: 'next' },
|
||||
{ key: 'ama', text: 'BotGem', value: 'ama' },
|
||||
{ key: 'opencat', text: 'OpenCat', value: 'opencat' },
|
||||
{ key: 'lobechat', text: 'LobeChat', value: 'lobechat' },
|
||||
];
|
||||
|
||||
const OPEN_LINK_OPTIONS = [
|
||||
{ key: 'next', text: 'ChatGPT Next Web', value: 'next' },
|
||||
{ key: 'ama', text: 'BotGem', value: 'ama' },
|
||||
{ key: 'opencat', text: 'OpenCat', value: 'opencat' },
|
||||
{ key: 'lobechat', text: 'LobeChat', value: 'lobechat' },
|
||||
];
|
||||
|
||||
function renderTimestamp(timestamp) {
|
||||
@@ -114,6 +116,9 @@ const TokensTable = () => {
|
||||
case 'next':
|
||||
url = nextUrl;
|
||||
break;
|
||||
case 'lobechat':
|
||||
url = nextLink + `/?settings={"keyVaults":{"openai":{"apiKey":"sk-${key}","baseURL":"${serverAddress}/v1"}}}`;
|
||||
break;
|
||||
default:
|
||||
url = `sk-${key}`;
|
||||
}
|
||||
@@ -153,7 +158,11 @@ const TokensTable = () => {
|
||||
case 'opencat':
|
||||
url = `opencat://team/join?domain=${encodedServerAddress}&token=sk-${key}`;
|
||||
break;
|
||||
|
||||
|
||||
case 'lobechat':
|
||||
url = chatLink + `/?settings={"keyVaults":{"openai":{"apiKey":"sk-${key}","baseURL":"${serverAddress}/v1"}}}`;
|
||||
break;
|
||||
|
||||
default:
|
||||
url = defaultUrl;
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ export const CHANNEL_OPTIONS = [
|
||||
{ key: 42, text: 'VertexAI', value: 42, color: 'blue' },
|
||||
{ key: 43, text: 'Proxy', value: 43, color: 'blue' },
|
||||
{ key: 44, text: 'SiliconFlow', value: 44, color: 'blue' },
|
||||
{ key: 45, text: 'xAI', value: 45, color: 'blue' },
|
||||
{ key: 8, text: '自定义渠道', value: 8, color: 'pink' },
|
||||
{ key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' },
|
||||
{ key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' },
|
||||
|
||||
@@ -43,6 +43,7 @@ const EditChannel = () => {
|
||||
base_url: '',
|
||||
other: '',
|
||||
model_mapping: '',
|
||||
system_prompt: '',
|
||||
models: [],
|
||||
groups: ['default']
|
||||
};
|
||||
@@ -425,7 +426,7 @@ const EditChannel = () => {
|
||||
)
|
||||
}
|
||||
{
|
||||
inputs.type !== 43 && (
|
||||
inputs.type !== 43 && (<>
|
||||
<Form.Field>
|
||||
<Form.TextArea
|
||||
label='模型重定向'
|
||||
@@ -437,6 +438,18 @@ const EditChannel = () => {
|
||||
autoComplete='new-password'
|
||||
/>
|
||||
</Form.Field>
|
||||
<Form.Field>
|
||||
<Form.TextArea
|
||||
label='系统提示词'
|
||||
placeholder={`此项可选,用于强制设置给定的系统提示词,请配合自定义模型 & 模型重定向使用,首先创建一个唯一的自定义模型名称并在上面填入,之后将该自定义模型重定向映射到该渠道一个原生支持的模型`}
|
||||
name='system_prompt'
|
||||
onChange={handleInputChange}
|
||||
value={inputs.system_prompt}
|
||||
style={{ minHeight: 150, fontFamily: 'JetBrains Mono, Consolas' }}
|
||||
autoComplete='new-password'
|
||||
/>
|
||||
</Form.Field>
|
||||
</>
|
||||
)
|
||||
}
|
||||
{
|
||||
|
||||
@@ -2,7 +2,7 @@ import React from 'react';
|
||||
import { Header, Segment } from 'semantic-ui-react';
|
||||
import ChannelsTable from '../../components/ChannelsTable';
|
||||
|
||||
const File = () => (
|
||||
const Channel = () => (
|
||||
<>
|
||||
<Segment>
|
||||
<Header as='h3'>管理渠道</Header>
|
||||
@@ -11,4 +11,4 @@ const File = () => (
|
||||
</>
|
||||
);
|
||||
|
||||
export default File;
|
||||
export default Channel;
|
||||
|
||||
Reference in New Issue
Block a user