mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-10-23 18:03:41 +08:00
Compare commits
16 Commits
v0.5.6-alp
...
v0.5.6-alp
Author | SHA1 | Date | |
---|---|---|---|
|
53b2cace0b | ||
|
f0fc991b44 | ||
|
594f06e7b0 | ||
|
197d1d7a9d | ||
|
f9b748c2ca | ||
|
fd98463611 | ||
|
f5a1cd3463 | ||
|
8651451e53 | ||
|
1c5bb97a42 | ||
|
de868e4e4e | ||
|
1d258cc898 | ||
|
37e09d764c | ||
|
159b9e3369 | ||
|
92001986db | ||
|
a5647b1ea7 | ||
|
215e54fc96 |
29
README.md
29
README.md
@@ -59,6 +59,9 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
|
||||
> **Warning**
|
||||
> 使用 Docker 拉取的最新镜像可能是 `alpha` 版本,如果追求稳定性请手动指定版本。
|
||||
|
||||
> **Warning**
|
||||
> 使用 root 用户初次登录系统后,务必修改默认密码 `123456`!
|
||||
|
||||
## 功能
|
||||
1. 支持多种大模型:
|
||||
+ [x] [OpenAI ChatGPT 系列模型](https://platform.openai.com/docs/guides/gpt/chat-completions-api)(支持 [Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference))
|
||||
@@ -269,6 +272,12 @@ docker run --name chatgpt-web -d -p 3002:3002 -e OPENAI_API_BASE_URL=https://ope
|
||||
|
||||
注意,具体的 API Base 的格式取决于你所使用的客户端。
|
||||
|
||||
例如对于 OpenAI 的官方库:
|
||||
```bash
|
||||
OPENAI_API_KEY="sk-xxxxxx"
|
||||
OPENAI_API_BASE="https://<HOST>:<PORT>/v1"
|
||||
```
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
A(用户)
|
||||
@@ -303,22 +312,24 @@ graph LR
|
||||
+ `SQL_CONN_MAX_LIFETIME`:连接的最大生命周期,默认为 `60`,单位分钟。
|
||||
4. `FRONTEND_BASE_URL`:设置之后将重定向页面请求到指定的地址,仅限从服务器设置。
|
||||
+ 例子:`FRONTEND_BASE_URL=https://openai.justsong.cn`
|
||||
5. `SYNC_FREQUENCY`:设置之后将定期与数据库同步配置,单位为秒,未设置则不进行同步。
|
||||
5. `MEMORY_CACHE_ENABLED`:启用内存缓存,会导致用户额度的更新存在一定的延迟,可选值为 `true` 和 `false`,未设置则默认为 `false`。
|
||||
+ 例子:`MEMORY_CACHE_ENABLED=true`
|
||||
6. `SYNC_FREQUENCY`:在启用缓存的情况下与数据库同步配置的频率,单位为秒,默认为 `600` 秒。
|
||||
+ 例子:`SYNC_FREQUENCY=60`
|
||||
6. `NODE_TYPE`:设置之后将指定节点类型,可选值为 `master` 和 `slave`,未设置则默认为 `master`。
|
||||
7. `NODE_TYPE`:设置之后将指定节点类型,可选值为 `master` 和 `slave`,未设置则默认为 `master`。
|
||||
+ 例子:`NODE_TYPE=slave`
|
||||
7. `CHANNEL_UPDATE_FREQUENCY`:设置之后将定期更新渠道余额,单位为分钟,未设置则不进行更新。
|
||||
8. `CHANNEL_UPDATE_FREQUENCY`:设置之后将定期更新渠道余额,单位为分钟,未设置则不进行更新。
|
||||
+ 例子:`CHANNEL_UPDATE_FREQUENCY=1440`
|
||||
8. `CHANNEL_TEST_FREQUENCY`:设置之后将定期检查渠道,单位为分钟,未设置则不进行检查。
|
||||
9. `CHANNEL_TEST_FREQUENCY`:设置之后将定期检查渠道,单位为分钟,未设置则不进行检查。
|
||||
+ 例子:`CHANNEL_TEST_FREQUENCY=1440`
|
||||
9. `POLLING_INTERVAL`:批量更新渠道余额以及测试可用性时的请求间隔,单位为秒,默认无间隔。
|
||||
+ 例子:`POLLING_INTERVAL=5`
|
||||
10. `BATCH_UPDATE_ENABLED`:启用数据库批量更新聚合,会导致用户额度的更新存在一定的延迟可选值为 `true` 和 `false`,未设置则默认为 `false`。
|
||||
10. `POLLING_INTERVAL`:批量更新渠道余额以及测试可用性时的请求间隔,单位为秒,默认无间隔。
|
||||
+ 例子:`POLLING_INTERVAL=5`
|
||||
11. `BATCH_UPDATE_ENABLED`:启用数据库批量更新聚合,会导致用户额度的更新存在一定的延迟可选值为 `true` 和 `false`,未设置则默认为 `false`。
|
||||
+ 例子:`BATCH_UPDATE_ENABLED=true`
|
||||
+ 如果你遇到了数据库连接数过多的问题,可以尝试启用该选项。
|
||||
11. `BATCH_UPDATE_INTERVAL=5`:批量更新聚合的时间间隔,单位为秒,默认为 `5`。
|
||||
12. `BATCH_UPDATE_INTERVAL=5`:批量更新聚合的时间间隔,单位为秒,默认为 `5`。
|
||||
+ 例子:`BATCH_UPDATE_INTERVAL=5`
|
||||
12. 请求频率限制:
|
||||
13. 请求频率限制:
|
||||
+ `GLOBAL_API_RATE_LIMIT`:全局 API 速率限制(除中继请求外),单 ip 三分钟内的最大请求数,默认为 `180`。
|
||||
+ `GLOBAL_WEB_RATE_LIMIT`:全局 Web 速率限制,单 ip 三分钟内的最大请求数,默认为 `60`。
|
||||
|
||||
|
@@ -56,6 +56,7 @@ var EmailDomainWhitelist = []string{
|
||||
}
|
||||
|
||||
var DebugEnabled = os.Getenv("DEBUG") == "true"
|
||||
var MemoryCacheEnabled = os.Getenv("MEMORY_CACHE_ENABLED") == "true"
|
||||
|
||||
var LogConsumeEnabled = true
|
||||
|
||||
@@ -92,7 +93,7 @@ var IsMasterNode = os.Getenv("NODE_TYPE") != "slave"
|
||||
var requestInterval, _ = strconv.Atoi(os.Getenv("POLLING_INTERVAL"))
|
||||
var RequestInterval = time.Duration(requestInterval) * time.Second
|
||||
|
||||
var SyncFrequency = 10 * 60 // unit is second, will be overwritten by SYNC_FREQUENCY
|
||||
var SyncFrequency = GetOrDefault("SYNC_FREQUENCY", 10*60) // unit is second
|
||||
|
||||
var BatchUpdateEnabled = false
|
||||
var BatchUpdateInterval = GetOrDefault("BATCH_UPDATE_INTERVAL", 5)
|
||||
|
@@ -24,6 +24,7 @@ var ModelRatio = map[string]float64{
|
||||
"gpt-3.5-turbo-0613": 0.75,
|
||||
"gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens
|
||||
"gpt-3.5-turbo-16k-0613": 1.5,
|
||||
"gpt-3.5-turbo-instruct": 0.75, // $0.0015 / 1K tokens
|
||||
"text-ada-001": 0.2,
|
||||
"text-babbage-001": 0.25,
|
||||
"text-curie-001": 1,
|
||||
@@ -50,8 +51,8 @@ var ModelRatio = map[string]float64{
|
||||
"chatglm_pro": 0.7143, // ¥0.01 / 1k tokens
|
||||
"chatglm_std": 0.3572, // ¥0.005 / 1k tokens
|
||||
"chatglm_lite": 0.1429, // ¥0.002 / 1k tokens
|
||||
"qwen-v1": 0.8572, // ¥0.012 / 1k tokens
|
||||
"qwen-plus-v1": 1, // ¥0.014 / 1k tokens
|
||||
"qwen-turbo": 0.8572, // ¥0.012 / 1k tokens
|
||||
"qwen-plus": 10, // ¥0.14 / 1k tokens
|
||||
"text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens
|
||||
"SparkDesk": 1.2858, // ¥0.018 / 1k tokens
|
||||
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
|
||||
|
@@ -111,7 +111,7 @@ func GetResponseBody(method, url string, channel *model.Channel, headers http.He
|
||||
}
|
||||
|
||||
func updateChannelCloseAIBalance(channel *model.Channel) (float64, error) {
|
||||
url := fmt.Sprintf("%s/dashboard/billing/credit_grants", channel.BaseURL)
|
||||
url := fmt.Sprintf("%s/dashboard/billing/credit_grants", channel.GetBaseURL())
|
||||
body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
|
||||
|
||||
if err != nil {
|
||||
@@ -201,18 +201,18 @@ func updateChannelAIGC2DBalance(channel *model.Channel) (float64, error) {
|
||||
|
||||
func updateChannelBalance(channel *model.Channel) (float64, error) {
|
||||
baseURL := common.ChannelBaseURLs[channel.Type]
|
||||
if channel.BaseURL == "" {
|
||||
channel.BaseURL = baseURL
|
||||
if channel.GetBaseURL() == "" {
|
||||
channel.BaseURL = &baseURL
|
||||
}
|
||||
switch channel.Type {
|
||||
case common.ChannelTypeOpenAI:
|
||||
if channel.BaseURL != "" {
|
||||
baseURL = channel.BaseURL
|
||||
if channel.GetBaseURL() != "" {
|
||||
baseURL = channel.GetBaseURL()
|
||||
}
|
||||
case common.ChannelTypeAzure:
|
||||
return 0, errors.New("尚未实现")
|
||||
case common.ChannelTypeCustom:
|
||||
baseURL = channel.BaseURL
|
||||
baseURL = channel.GetBaseURL()
|
||||
case common.ChannelTypeCloseAI:
|
||||
return updateChannelCloseAIBalance(channel)
|
||||
case common.ChannelTypeOpenAISB:
|
||||
|
@@ -42,10 +42,10 @@ func testChannel(channel *model.Channel, request ChatRequest) (err error, openai
|
||||
}
|
||||
requestURL := common.ChannelBaseURLs[channel.Type]
|
||||
if channel.Type == common.ChannelTypeAzure {
|
||||
requestURL = fmt.Sprintf("%s/openai/deployments/%s/chat/completions?api-version=2023-03-15-preview", channel.BaseURL, request.Model)
|
||||
requestURL = fmt.Sprintf("%s/openai/deployments/%s/chat/completions?api-version=2023-03-15-preview", channel.GetBaseURL(), request.Model)
|
||||
} else {
|
||||
if channel.BaseURL != "" {
|
||||
requestURL = channel.BaseURL
|
||||
if channel.GetBaseURL() != "" {
|
||||
requestURL = channel.GetBaseURL()
|
||||
}
|
||||
requestURL += "/v1/chat/completions"
|
||||
}
|
||||
|
@@ -117,6 +117,15 @@ func init() {
|
||||
Root: "gpt-3.5-turbo-16k-0613",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-3.5-turbo-instruct",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-3.5-turbo-instruct",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-4",
|
||||
Object: "model",
|
||||
@@ -343,21 +352,21 @@ func init() {
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "qwen-v1",
|
||||
Id: "qwen-turbo",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "ali",
|
||||
Permission: permission,
|
||||
Root: "qwen-v1",
|
||||
Root: "qwen-turbo",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "qwen-plus-v1",
|
||||
Id: "qwen-plus",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "ali",
|
||||
Permission: permission,
|
||||
Root: "qwen-plus-v1",
|
||||
Root: "qwen-plus",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
|
@@ -9,44 +9,53 @@ import (
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var stopFinishReason = "stop"
|
||||
|
||||
// tokenEncoderMap won't grow after initialization
|
||||
var tokenEncoderMap = map[string]*tiktoken.Tiktoken{}
|
||||
var defaultTokenEncoder *tiktoken.Tiktoken
|
||||
|
||||
func InitTokenEncoders() {
|
||||
common.SysLog("initializing token encoders")
|
||||
fallbackTokenEncoder, err := tiktoken.EncodingForModel("gpt-3.5-turbo")
|
||||
gpt35TokenEncoder, err := tiktoken.EncodingForModel("gpt-3.5-turbo")
|
||||
if err != nil {
|
||||
common.FatalLog(fmt.Sprintf("failed to get fallback token encoder: %s", err.Error()))
|
||||
common.FatalLog(fmt.Sprintf("failed to get gpt-3.5-turbo token encoder: %s", err.Error()))
|
||||
}
|
||||
defaultTokenEncoder = gpt35TokenEncoder
|
||||
gpt4TokenEncoder, err := tiktoken.EncodingForModel("gpt-4")
|
||||
if err != nil {
|
||||
common.FatalLog(fmt.Sprintf("failed to get gpt-4 token encoder: %s", err.Error()))
|
||||
}
|
||||
for model, _ := range common.ModelRatio {
|
||||
tokenEncoder, err := tiktoken.EncodingForModel(model)
|
||||
if err != nil {
|
||||
common.SysError(fmt.Sprintf("using fallback encoder for model %s", model))
|
||||
tokenEncoderMap[model] = fallbackTokenEncoder
|
||||
continue
|
||||
if strings.HasPrefix(model, "gpt-3.5") {
|
||||
tokenEncoderMap[model] = gpt35TokenEncoder
|
||||
} else if strings.HasPrefix(model, "gpt-4") {
|
||||
tokenEncoderMap[model] = gpt4TokenEncoder
|
||||
} else {
|
||||
tokenEncoderMap[model] = nil
|
||||
}
|
||||
tokenEncoderMap[model] = tokenEncoder
|
||||
}
|
||||
common.SysLog("token encoders initialized")
|
||||
}
|
||||
|
||||
func getTokenEncoder(model string) *tiktoken.Tiktoken {
|
||||
if tokenEncoder, ok := tokenEncoderMap[model]; ok {
|
||||
tokenEncoder, ok := tokenEncoderMap[model]
|
||||
if ok && tokenEncoder != nil {
|
||||
return tokenEncoder
|
||||
}
|
||||
tokenEncoder, err := tiktoken.EncodingForModel(model)
|
||||
if err != nil {
|
||||
common.SysError(fmt.Sprintf("failed to get token encoder for model %s: %s, using encoder for gpt-3.5-turbo", model, err.Error()))
|
||||
tokenEncoder, err = tiktoken.EncodingForModel("gpt-3.5-turbo")
|
||||
if ok {
|
||||
tokenEncoder, err := tiktoken.EncodingForModel(model)
|
||||
if err != nil {
|
||||
common.FatalLog(fmt.Sprintf("failed to get token encoder for model gpt-3.5-turbo: %s", err.Error()))
|
||||
common.SysError(fmt.Sprintf("failed to get token encoder for model %s: %s, using encoder for gpt-3.5-turbo", model, err.Error()))
|
||||
tokenEncoder = defaultTokenEncoder
|
||||
}
|
||||
tokenEncoderMap[model] = tokenEncoder
|
||||
return tokenEncoder
|
||||
}
|
||||
tokenEncoderMap[model] = tokenEncoder
|
||||
return tokenEncoder
|
||||
return defaultTokenEncoder
|
||||
}
|
||||
|
||||
func getTokenNum(tokenEncoder *tiktoken.Tiktoken, text string) int {
|
||||
|
20
main.go
20
main.go
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"github.com/gin-contrib/sessions"
|
||||
"github.com/gin-contrib/sessions/cookie"
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -50,18 +51,17 @@ func main() {
|
||||
// Initialize options
|
||||
model.InitOptionMap()
|
||||
if common.RedisEnabled {
|
||||
// for compatibility with old versions
|
||||
common.MemoryCacheEnabled = true
|
||||
}
|
||||
if common.MemoryCacheEnabled {
|
||||
common.SysLog("memory cache enabled")
|
||||
common.SysError(fmt.Sprintf("sync frequency: %d seconds", common.SyncFrequency))
|
||||
model.InitChannelCache()
|
||||
}
|
||||
if os.Getenv("SYNC_FREQUENCY") != "" {
|
||||
frequency, err := strconv.Atoi(os.Getenv("SYNC_FREQUENCY"))
|
||||
if err != nil {
|
||||
common.FatalLog("failed to parse SYNC_FREQUENCY: " + err.Error())
|
||||
}
|
||||
common.SyncFrequency = frequency
|
||||
go model.SyncOptions(frequency)
|
||||
if common.RedisEnabled {
|
||||
go model.SyncChannelCache(frequency)
|
||||
}
|
||||
if common.MemoryCacheEnabled {
|
||||
go model.SyncOptions(common.SyncFrequency)
|
||||
go model.SyncChannelCache(common.SyncFrequency)
|
||||
}
|
||||
if os.Getenv("CHANNEL_UPDATE_FREQUENCY") != "" {
|
||||
frequency, err := strconv.Atoi(os.Getenv("CHANNEL_UPDATE_FREQUENCY"))
|
||||
|
@@ -94,7 +94,7 @@ func TokenAuth() func(c *gin.Context) {
|
||||
abortWithMessage(c, http.StatusUnauthorized, err.Error())
|
||||
return
|
||||
}
|
||||
userEnabled, err := model.IsUserEnabled(token.UserId)
|
||||
userEnabled, err := model.CacheIsUserEnabled(token.UserId)
|
||||
if err != nil {
|
||||
abortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
|
@@ -82,9 +82,9 @@ func Distribute() func(c *gin.Context) {
|
||||
c.Set("channel", channel.Type)
|
||||
c.Set("channel_id", channel.Id)
|
||||
c.Set("channel_name", channel.Name)
|
||||
c.Set("model_mapping", channel.ModelMapping)
|
||||
c.Set("model_mapping", channel.GetModelMapping())
|
||||
c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
|
||||
c.Set("base_url", channel.BaseURL)
|
||||
c.Set("base_url", channel.GetBaseURL())
|
||||
switch channel.Type {
|
||||
case common.ChannelTypeAzure:
|
||||
c.Set("api_version", channel.Other)
|
||||
|
@@ -10,16 +10,18 @@ type Ability struct {
|
||||
Model string `json:"model" gorm:"primaryKey;autoIncrement:false"`
|
||||
ChannelId int `json:"channel_id" gorm:"primaryKey;autoIncrement:false;index"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Priority int64 `json:"priority" gorm:"bigint;default:0"`
|
||||
Priority *int64 `json:"priority" gorm:"bigint;default:0;index"`
|
||||
}
|
||||
|
||||
func GetRandomSatisfiedChannel(group string, model string) (*Channel, error) {
|
||||
ability := Ability{}
|
||||
var err error = nil
|
||||
maxPrioritySubQuery := DB.Model(&Ability{}).Select("MAX(priority)").Where("`group` = ? and model = ? and enabled = 1", group, model)
|
||||
channelQuery := DB.Where("`group` = ? and model = ? and enabled = 1 and priority = (?)", group, model, maxPrioritySubQuery)
|
||||
if common.UsingSQLite {
|
||||
err = DB.Where("`group` = ? and model = ? and enabled = 1", group, model).Order("CASE WHEN priority <> 0 THEN priority ELSE RANDOM() END DESC ").Limit(1).First(&ability).Error
|
||||
err = channelQuery.Order("RANDOM()").First(&ability).Error
|
||||
} else {
|
||||
err = DB.Where("`group` = ? and model = ? and enabled = 1", group, model).Order("CASE WHEN priority <> 0 THEN priority ELSE RAND() END DESC").Limit(1).First(&ability).Error
|
||||
err = channelQuery.Order("RAND()").First(&ability).Error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@@ -165,7 +165,7 @@ func InitChannelCache() {
|
||||
for group, model2channels := range newGroup2model2channels {
|
||||
for model, channels := range model2channels {
|
||||
sort.Slice(channels, func(i, j int) bool {
|
||||
return channels[i].Priority > channels[j].Priority
|
||||
return channels[i].GetPriority() > channels[j].GetPriority()
|
||||
})
|
||||
newGroup2model2channels[group][model] = channels
|
||||
}
|
||||
@@ -186,7 +186,7 @@ func SyncChannelCache(frequency int) {
|
||||
}
|
||||
|
||||
func CacheGetRandomSatisfiedChannel(group string, model string) (*Channel, error) {
|
||||
if !common.RedisEnabled {
|
||||
if !common.MemoryCacheEnabled {
|
||||
return GetRandomSatisfiedChannel(group, model)
|
||||
}
|
||||
channelSyncLock.RLock()
|
||||
@@ -195,11 +195,17 @@ func CacheGetRandomSatisfiedChannel(group string, model string) (*Channel, error
|
||||
if len(channels) == 0 {
|
||||
return nil, errors.New("channel not found")
|
||||
}
|
||||
endIdx := len(channels)
|
||||
// choose by priority
|
||||
firstChannel := channels[0]
|
||||
if firstChannel.Priority > 0 {
|
||||
return firstChannel, nil
|
||||
if firstChannel.GetPriority() > 0 {
|
||||
for i := range channels {
|
||||
if channels[i].GetPriority() != firstChannel.GetPriority() {
|
||||
endIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
idx := rand.Intn(len(channels))
|
||||
idx := rand.Intn(endIdx)
|
||||
return channels[idx], nil
|
||||
}
|
||||
|
@@ -11,19 +11,19 @@ type Channel struct {
|
||||
Key string `json:"key" gorm:"not null;index"`
|
||||
Status int `json:"status" gorm:"default:1"`
|
||||
Name string `json:"name" gorm:"index"`
|
||||
Weight int `json:"weight"`
|
||||
Weight *uint `json:"weight" gorm:"default:0"`
|
||||
CreatedTime int64 `json:"created_time" gorm:"bigint"`
|
||||
TestTime int64 `json:"test_time" gorm:"bigint"`
|
||||
ResponseTime int `json:"response_time"` // in milliseconds
|
||||
BaseURL string `json:"base_url" gorm:"column:base_url"`
|
||||
BaseURL *string `json:"base_url" gorm:"column:base_url;default:''"`
|
||||
Other string `json:"other"`
|
||||
Balance float64 `json:"balance"` // in USD
|
||||
BalanceUpdatedTime int64 `json:"balance_updated_time" gorm:"bigint"`
|
||||
Models string `json:"models"`
|
||||
Group string `json:"group" gorm:"type:varchar(32);default:'default'"`
|
||||
UsedQuota int64 `json:"used_quota" gorm:"bigint;default:0"`
|
||||
ModelMapping string `json:"model_mapping" gorm:"type:varchar(1024);default:''"`
|
||||
Priority int64 `json:"priority" gorm:"bigint;default:0"`
|
||||
ModelMapping *string `json:"model_mapping" gorm:"type:varchar(1024);default:''"`
|
||||
Priority *int64 `json:"priority" gorm:"bigint;default:0"`
|
||||
}
|
||||
|
||||
func GetAllChannels(startIdx int, num int, selectAll bool) ([]*Channel, error) {
|
||||
@@ -79,6 +79,27 @@ func BatchInsertChannels(channels []Channel) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (channel *Channel) GetPriority() int64 {
|
||||
if channel.Priority == nil {
|
||||
return 0
|
||||
}
|
||||
return *channel.Priority
|
||||
}
|
||||
|
||||
func (channel *Channel) GetBaseURL() string {
|
||||
if channel.BaseURL == nil {
|
||||
return ""
|
||||
}
|
||||
return *channel.BaseURL
|
||||
}
|
||||
|
||||
func (channel *Channel) GetModelMapping() string {
|
||||
if channel.ModelMapping == nil {
|
||||
return ""
|
||||
}
|
||||
return *channel.ModelMapping
|
||||
}
|
||||
|
||||
func (channel *Channel) Insert() error {
|
||||
var err error
|
||||
err = DB.Create(channel).Error
|
||||
|
11
model/log.go
11
model/log.go
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
type Log struct {
|
||||
Id int `json:"id"`
|
||||
UserId int `json:"user_id"`
|
||||
UserId int `json:"user_id" gorm:"index"`
|
||||
CreatedAt int64 `json:"created_at" gorm:"bigint;index"`
|
||||
Type int `json:"type" gorm:"index"`
|
||||
Content string `json:"content"`
|
||||
@@ -19,7 +19,7 @@ type Log struct {
|
||||
Quota int `json:"quota" gorm:"default:0"`
|
||||
PromptTokens int `json:"prompt_tokens" gorm:"default:0"`
|
||||
CompletionTokens int `json:"completion_tokens" gorm:"default:0"`
|
||||
Channel int `json:"channel" gorm:"default:0"`
|
||||
ChannelId int `json:"channel" gorm:"index"`
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -47,7 +47,6 @@ func RecordLog(userId int, logType int, content string) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptTokens int, completionTokens int, modelName string, tokenName string, quota int, content string) {
|
||||
common.LogInfo(ctx, fmt.Sprintf("record consume log: userId=%d, channelId=%d, promptTokens=%d, completionTokens=%d, modelName=%s, tokenName=%s, quota=%d, content=%s", userId, channelId, promptTokens, completionTokens, modelName, tokenName, quota, content))
|
||||
if !common.LogConsumeEnabled {
|
||||
@@ -64,7 +63,7 @@ func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptToke
|
||||
TokenName: tokenName,
|
||||
ModelName: modelName,
|
||||
Quota: quota,
|
||||
Channel: channelId,
|
||||
ChannelId: channelId,
|
||||
}
|
||||
err := DB.Create(log).Error
|
||||
if err != nil {
|
||||
@@ -135,7 +134,7 @@ func SearchUserLogs(userId int, keyword string) (logs []*Log, err error) {
|
||||
}
|
||||
|
||||
func SumUsedQuota(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string, channel int) (quota int) {
|
||||
tx := DB.Table("logs").Select("sum(quota)")
|
||||
tx := DB.Table("logs").Select("ifnull(sum(quota),0)")
|
||||
if username != "" {
|
||||
tx = tx.Where("username = ?", username)
|
||||
}
|
||||
@@ -159,7 +158,7 @@ func SumUsedQuota(logType int, startTimestamp int64, endTimestamp int64, modelNa
|
||||
}
|
||||
|
||||
func SumUsedToken(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string) (token int) {
|
||||
tx := DB.Table("logs").Select("sum(prompt_tokens) + sum(completion_tokens)")
|
||||
tx := DB.Table("logs").Select("ifnull(sum(prompt_tokens),0) + ifnull(sum(completion_tokens),0)")
|
||||
if username != "" {
|
||||
tx = tx.Where("username = ?", username)
|
||||
}
|
||||
|
@@ -2,8 +2,8 @@ import React, { useContext, useEffect, useState } from 'react';
|
||||
import { Button, Divider, Form, Grid, Header, Image, Message, Modal, Segment } from 'semantic-ui-react';
|
||||
import { Link, useNavigate, useSearchParams } from 'react-router-dom';
|
||||
import { UserContext } from '../context/User';
|
||||
import { API, getLogo, showError, showSuccess } from '../helpers';
|
||||
import { getOAuthState, onGitHubOAuthClicked } from './utils';
|
||||
import { API, getLogo, showError, showSuccess, showWarning } from '../helpers';
|
||||
import { onGitHubOAuthClicked } from './utils';
|
||||
|
||||
const LoginForm = () => {
|
||||
const [inputs, setInputs] = useState({
|
||||
@@ -68,8 +68,14 @@ const LoginForm = () => {
|
||||
if (success) {
|
||||
userDispatch({ type: 'login', payload: data });
|
||||
localStorage.setItem('user', JSON.stringify(data));
|
||||
navigate('/');
|
||||
showSuccess('登录成功!');
|
||||
if (username === 'root' && password === '123456') {
|
||||
navigate('/user/edit');
|
||||
showSuccess('登录成功!');
|
||||
showWarning('请立刻修改默认密码!');
|
||||
} else {
|
||||
navigate('/token');
|
||||
showSuccess('登录成功!');
|
||||
}
|
||||
} else {
|
||||
showError(message);
|
||||
}
|
||||
@@ -126,7 +132,7 @@ const LoginForm = () => {
|
||||
circular
|
||||
color='black'
|
||||
icon='github'
|
||||
onClick={()=>onGitHubOAuthClicked(status.github_client_id)}
|
||||
onClick={() => onGitHubOAuthClicked(status.github_client_id)}
|
||||
/>
|
||||
) : (
|
||||
<></>
|
||||
|
@@ -67,7 +67,7 @@ const EditChannel = () => {
|
||||
localModels = ['ERNIE-Bot', 'ERNIE-Bot-turbo', 'Embedding-V1'];
|
||||
break;
|
||||
case 17:
|
||||
localModels = ['qwen-v1', 'qwen-plus-v1', 'text-embedding-v1'];
|
||||
localModels = ['qwen-turbo', 'qwen-plus', 'text-embedding-v1'];
|
||||
break;
|
||||
case 16:
|
||||
localModels = ['chatglm_pro', 'chatglm_std', 'chatglm_lite'];
|
||||
@@ -174,7 +174,7 @@ const EditChannel = () => {
|
||||
return;
|
||||
}
|
||||
let localInputs = inputs;
|
||||
if (localInputs.base_url.endsWith('/')) {
|
||||
if (localInputs.base_url && localInputs.base_url.endsWith('/')) {
|
||||
localInputs.base_url = localInputs.base_url.slice(0, localInputs.base_url.length - 1);
|
||||
}
|
||||
if (localInputs.type === 3 && localInputs.other === '') {
|
||||
@@ -183,9 +183,6 @@ const EditChannel = () => {
|
||||
if (localInputs.type === 18 && localInputs.other === '') {
|
||||
localInputs.other = 'v2.1';
|
||||
}
|
||||
if (localInputs.model_mapping === '') {
|
||||
localInputs.model_mapping = '{}';
|
||||
}
|
||||
let res;
|
||||
localInputs.models = localInputs.models.join(',');
|
||||
localInputs.group = localInputs.groups.join(',');
|
||||
|
@@ -102,7 +102,7 @@ const EditUser = () => {
|
||||
label='密码'
|
||||
name='password'
|
||||
type={'password'}
|
||||
placeholder={'请输入新的密码'}
|
||||
placeholder={'请输入新的密码,最短 8 位'}
|
||||
onChange={handleInputChange}
|
||||
value={password}
|
||||
autoComplete='new-password'
|
||||
|
Reference in New Issue
Block a user