mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-11-10 02:23:43 +08:00
Compare commits
5 Commits
v0.4.5-alp
...
v0.4.5-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
567916bd80 | ||
|
|
1f3b3ca7ae | ||
|
|
70cffbc258 | ||
|
|
6d961064d2 | ||
|
|
ba54c71948 |
10
README.md
10
README.md
@@ -151,9 +151,10 @@ sudo service nginx restart
|
||||
|
||||
### 多机部署
|
||||
1. 所有服务器 `SESSION_SECRET` 设置一样的值。
|
||||
2. 必须设置 `SQL_DSN`,使用 MySQL 数据库而非 SQLite,请自行配置主备数据库同步。
|
||||
2. 必须设置 `SQL_DSN`,使用 MySQL 数据库而非 SQLite,所有服务器连接同一个数据库。
|
||||
3. 所有从服务器必须设置 `SYNC_FREQUENCY`,以定期从数据库同步配置。
|
||||
4. 从服务器可以选择设置 `FRONTEND_BASE_URL`,以重定向页面请求到主服务器。
|
||||
5. 推荐每台服务器上都分别装好 Redis,设置好 `REDIS_CONN_STRING`,这样可以做到在缓存未过期的情况下数据库零访问,可以减少延迟。
|
||||
|
||||
环境变量的具体使用方法详见[此处](#环境变量)。
|
||||
|
||||
@@ -170,7 +171,7 @@ sudo service nginx restart
|
||||
项目主页:https://github.com/Yidadaa/ChatGPT-Next-Web
|
||||
|
||||
```bash
|
||||
docker run --name chat-next-web -d -p 3001:3000 -e BASE_URL=https://openai.justsong.cn yidadaa/chatgpt-next-web
|
||||
docker run --name chat-next-web -d -p 3001:3000 yidadaa/chatgpt-next-web
|
||||
```
|
||||
|
||||
注意修改端口号和 `BASE_URL`。
|
||||
@@ -267,7 +268,7 @@ https://openai.justsong.cn
|
||||
1. 额度是什么?怎么计算的?One API 的额度计算有问题?
|
||||
+ 额度 = token * 倍率
|
||||
+ 倍率包括分组的倍率,以及补全的倍率。
|
||||
+ 如果是非流模式,官方接口会返回消耗的总 token,但是你要注意提示和补全的消耗额度不一样。
|
||||
+ 如果是非流模式,官方接口会返回消耗的总 token,但是你要注意提示和补全的消耗倍率不一样。
|
||||
2. 账户额度足够为什么提示额度不足?
|
||||
+ 请检查你的令牌额度是否足够,这个和账户额度是分开的。
|
||||
+ 令牌额度仅供用户设置最大使用量,用户可自由设置。
|
||||
@@ -277,6 +278,9 @@ https://openai.justsong.cn
|
||||
4. 渠道测试报错:`invalid character '<' looking for beginning of value`
|
||||
+ 这是因为返回值不是合法的 JSON,而是一个 HTML 页面。
|
||||
+ 大概率是你的部署站的 IP 或代理的节点被 CloudFlare 封禁了。
|
||||
5. ChatGPT Next Web 报错:`Failed to fetch`
|
||||
+ 部署的时候不要设置 `BASE_URL`。
|
||||
+ 检查你的接口地址和 API Key 有没有填对。
|
||||
|
||||
## 注意
|
||||
本项目为开源项目,请在遵循 OpenAI 的[使用条款](https://openai.com/policies/terms-of-use)以及**法律法规**的情况下使用,不得用于非法用途。
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -67,6 +68,8 @@ var PreConsumedQuota = 500
|
||||
|
||||
var RootUserEmail = ""
|
||||
|
||||
var IsMasterNode = os.Getenv("SYNC_FREQUENCY") == ""
|
||||
|
||||
const (
|
||||
RoleGuestUser = 0
|
||||
RoleCommonUser = 1
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
channelType := c.GetInt("channel")
|
||||
tokenId := c.GetInt("token_id")
|
||||
userId := c.GetInt("id")
|
||||
consumeQuota := c.GetBool("consume_quota")
|
||||
group := c.GetString("group")
|
||||
var textRequest GeneralOpenAIRequest
|
||||
@@ -73,7 +74,16 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
groupRatio := common.GetGroupRatio(group)
|
||||
ratio := modelRatio * groupRatio
|
||||
preConsumedQuota := int(float64(preConsumedTokens) * ratio)
|
||||
if consumeQuota {
|
||||
userQuota, err := model.CacheGetUserQuota(userId)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "get_user_quota_failed", http.StatusOK)
|
||||
}
|
||||
if userQuota > 10*preConsumedQuota {
|
||||
// in this case, we do not pre-consume quota
|
||||
// because the user has enough quota
|
||||
preConsumedQuota = 0
|
||||
}
|
||||
if consumeQuota && preConsumedQuota > 0 {
|
||||
err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusOK)
|
||||
@@ -133,7 +143,6 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
common.SysError("Error consuming token remain quota: " + err.Error())
|
||||
}
|
||||
tokenName := c.GetString("token_name")
|
||||
userId := c.GetInt("id")
|
||||
model.RecordLog(userId, model.LogTypeConsume, fmt.Sprintf("通过令牌「%s」使用模型 %s 消耗 %s(模型倍率 %.2f,分组倍率 %.2f)", tokenName, textRequest.Model, common.LogQuota(quota), modelRatio, groupRatio))
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
channelId := c.GetInt("channel_id")
|
||||
|
||||
@@ -100,7 +100,7 @@ func TokenAuth() func(c *gin.Context) {
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
if !model.IsUserEnabled(token.UserId) {
|
||||
if !model.CacheIsUserEnabled(token.UserId) {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"error": gin.H{
|
||||
"message": "用户已被封禁",
|
||||
|
||||
@@ -2,15 +2,21 @@ package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"one-api/common"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
TokenCacheSeconds = 60 * 60
|
||||
UserId2GroupCacheSeconds = 60 * 60
|
||||
TokenCacheSeconds = 60 * 60
|
||||
UserId2GroupCacheSeconds = 60 * 60
|
||||
UserId2QuotaCacheSeconds = 10 * 60
|
||||
UserId2StatusCacheSeconds = 60 * 60
|
||||
)
|
||||
|
||||
func CacheGetTokenByKey(key string) (*Token, error) {
|
||||
@@ -57,18 +63,54 @@ func CacheGetUserGroup(id int) (group string, err error) {
|
||||
return group, err
|
||||
}
|
||||
|
||||
var channelId2channel map[int]*Channel
|
||||
var channelSyncLock sync.RWMutex
|
||||
func CacheGetUserQuota(id int) (quota int, err error) {
|
||||
if !common.RedisEnabled {
|
||||
return GetUserQuota(id)
|
||||
}
|
||||
quotaString, err := common.RedisGet(fmt.Sprintf("user_quota:%d", id))
|
||||
if err != nil {
|
||||
quota, err = GetUserQuota(id)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = common.RedisSet(fmt.Sprintf("user_quota:%d", id), fmt.Sprintf("%d", quota), UserId2QuotaCacheSeconds*time.Second)
|
||||
if err != nil {
|
||||
common.SysError("Redis set user quota error: " + err.Error())
|
||||
}
|
||||
return quota, err
|
||||
}
|
||||
quota, err = strconv.Atoi(quotaString)
|
||||
return quota, err
|
||||
}
|
||||
|
||||
func CacheIsUserEnabled(userId int) bool {
|
||||
if !common.RedisEnabled {
|
||||
return IsUserEnabled(userId)
|
||||
}
|
||||
enabled, err := common.RedisGet(fmt.Sprintf("user_enabled:%d", userId))
|
||||
if err != nil {
|
||||
status := common.UserStatusDisabled
|
||||
if IsUserEnabled(userId) {
|
||||
status = common.UserStatusEnabled
|
||||
}
|
||||
enabled = fmt.Sprintf("%d", status)
|
||||
err = common.RedisSet(fmt.Sprintf("user_enabled:%d", userId), enabled, UserId2StatusCacheSeconds*time.Second)
|
||||
if err != nil {
|
||||
common.SysError("Redis set user enabled error: " + err.Error())
|
||||
}
|
||||
}
|
||||
return enabled == "1"
|
||||
}
|
||||
|
||||
var group2model2channels map[string]map[string][]*Channel
|
||||
var channelSyncLock sync.RWMutex
|
||||
|
||||
func InitChannelCache() {
|
||||
channelSyncLock.Lock()
|
||||
defer channelSyncLock.Unlock()
|
||||
channelId2channel = make(map[int]*Channel)
|
||||
newChannelId2channel := make(map[int]*Channel)
|
||||
var channels []*Channel
|
||||
DB.Find(&channels)
|
||||
for _, channel := range channels {
|
||||
channelId2channel[channel.Id] = channel
|
||||
newChannelId2channel[channel.Id] = channel
|
||||
}
|
||||
var abilities []*Ability
|
||||
DB.Find(&abilities)
|
||||
@@ -76,11 +118,26 @@ func InitChannelCache() {
|
||||
for _, ability := range abilities {
|
||||
groups[ability.Group] = true
|
||||
}
|
||||
group2model2channels = make(map[string]map[string][]*Channel)
|
||||
newGroup2model2channels := make(map[string]map[string][]*Channel)
|
||||
for group := range groups {
|
||||
group2model2channels[group] = make(map[string][]*Channel)
|
||||
// TODO: implement this
|
||||
newGroup2model2channels[group] = make(map[string][]*Channel)
|
||||
}
|
||||
for _, channel := range channels {
|
||||
groups := strings.Split(channel.Group, ",")
|
||||
for _, group := range groups {
|
||||
models := strings.Split(channel.Models, ",")
|
||||
for _, model := range models {
|
||||
if _, ok := newGroup2model2channels[group][model]; !ok {
|
||||
newGroup2model2channels[group][model] = make([]*Channel, 0)
|
||||
}
|
||||
newGroup2model2channels[group][model] = append(newGroup2model2channels[group][model], channel)
|
||||
}
|
||||
}
|
||||
}
|
||||
channelSyncLock.Lock()
|
||||
group2model2channels = newGroup2model2channels
|
||||
channelSyncLock.Unlock()
|
||||
common.SysLog("Channels synced from database")
|
||||
}
|
||||
|
||||
func SyncChannelCache(frequency int) {
|
||||
@@ -95,7 +152,12 @@ func CacheGetRandomSatisfiedChannel(group string, model string) (*Channel, error
|
||||
if !common.RedisEnabled {
|
||||
return GetRandomSatisfiedChannel(group, model)
|
||||
}
|
||||
return GetRandomSatisfiedChannel(group, model)
|
||||
// TODO: implement this
|
||||
return nil, nil
|
||||
channelSyncLock.RLock()
|
||||
defer channelSyncLock.RUnlock()
|
||||
channels := group2model2channels[group][model]
|
||||
if len(channels) == 0 {
|
||||
return nil, errors.New("channel not found")
|
||||
}
|
||||
idx := rand.Intn(len(channels))
|
||||
return channels[idx], nil
|
||||
}
|
||||
|
||||
@@ -55,6 +55,9 @@ func InitDB() (err error) {
|
||||
}
|
||||
if err == nil {
|
||||
DB = db
|
||||
if !common.IsMasterNode {
|
||||
return nil
|
||||
}
|
||||
err := db.AutoMigrate(&Channel{})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -238,9 +238,17 @@ const ChannelsTable = () => {
|
||||
if (channels.length === 0) return;
|
||||
setLoading(true);
|
||||
let sortedChannels = [...channels];
|
||||
sortedChannels.sort((a, b) => {
|
||||
return ('' + a[key]).localeCompare(b[key]);
|
||||
});
|
||||
if (typeof sortedChannels[0][key] === 'string'){
|
||||
sortedChannels.sort((a, b) => {
|
||||
return ('' + a[key]).localeCompare(b[key]);
|
||||
});
|
||||
} else {
|
||||
sortedChannels.sort((a, b) => {
|
||||
if (a[key] === b[key]) return 0;
|
||||
if (a[key] > b[key]) return -1;
|
||||
if (a[key] < b[key]) return 1;
|
||||
});
|
||||
}
|
||||
if (sortedChannels[0].id === channels[0].id) {
|
||||
sortedChannels.reverse();
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user