mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-10-23 18:03:41 +08:00
Compare commits
11 Commits
v0.4.5
...
v0.4.5-alp
Author | SHA1 | Date | |
---|---|---|---|
|
14b85318a6 | ||
|
b179c2f208 | ||
|
3d76a974d1 | ||
|
4250064296 | ||
|
868d9a87d2 | ||
|
33846ce4f6 | ||
|
e5ac80c15d | ||
|
9291b5fb20 | ||
|
d97f1df3c9 | ||
|
f0434c810c | ||
|
f6fe34676f |
21
README.md
21
README.md
@@ -52,10 +52,10 @@ _✨ All in one 的 OpenAI 接口,整合各种 API 访问方式,开箱即用
|
||||
1. 支持多种 API 访问渠道,欢迎 PR 或提 issue 添加更多渠道:
|
||||
+ [x] OpenAI 官方通道(支持配置代理)
|
||||
+ [x] **Azure OpenAI API**
|
||||
+ [x] [OpenAI-SB](https://openai-sb.com)
|
||||
+ [x] [API2D](https://api2d.com/r/197971)
|
||||
+ [x] [OhMyGPT](https://aigptx.top?aff=uFpUl2Kf)
|
||||
+ [x] [AI Proxy](https://aiproxy.io/?i=OneAPI) (邀请码:`OneAPI`)
|
||||
+ [x] [OpenAI-SB](https://openai-sb.com)
|
||||
+ [x] [API2GPT](http://console.api2gpt.com/m/00002S)
|
||||
+ [x] [CloseAI](https://console.closeai-asia.com/r/2412)
|
||||
+ [x] [AI.LS](https://ai.ls)
|
||||
@@ -70,17 +70,18 @@ _✨ All in one 的 OpenAI 接口,整合各种 API 访问方式,开箱即用
|
||||
8. 支持**用户分组**以及**渠道分组**,支持为不同分组设置不同的倍率。
|
||||
9. 支持渠道**设置模型列表**。
|
||||
10. 支持**查看额度明细**。
|
||||
11. 支持发布公告,设置充值链接,设置新用户初始额度。
|
||||
12. 支持丰富的**自定义**设置,
|
||||
11. 支持**用户邀请奖励**。
|
||||
12. 支持发布公告,设置充值链接,设置新用户初始额度。
|
||||
13. 支持丰富的**自定义**设置,
|
||||
1. 支持自定义系统名称,logo 以及页脚。
|
||||
2. 支持自定义首页和关于页面,可以选择使用 HTML & Markdown 代码进行自定义,或者使用一个单独的网页通过 iframe 嵌入。
|
||||
13. 支持通过系统访问令牌访问管理 API。
|
||||
14. 支持 Cloudflare Turnstile 用户校验。
|
||||
15. 支持用户管理,支持**多种用户登录注册方式**:
|
||||
14. 支持通过系统访问令牌访问管理 API。
|
||||
15. 支持 Cloudflare Turnstile 用户校验。
|
||||
16. 支持用户管理,支持**多种用户登录注册方式**:
|
||||
+ 邮箱登录注册以及通过邮箱进行密码重置。
|
||||
+ [GitHub 开放授权](https://github.com/settings/applications/new)。
|
||||
+ 微信公众号授权(需要额外部署 [WeChat Server](https://github.com/songquanpeng/wechat-server))。
|
||||
16. 未来其他大模型开放 API 后,将第一时间支持,并将其封装成同样的 API 访问方式。
|
||||
17. 未来其他大模型开放 API 后,将第一时间支持,并将其封装成同样的 API 访问方式。
|
||||
|
||||
## 部署
|
||||
### 基于 Docker 进行部署
|
||||
@@ -157,9 +158,9 @@ sudo service nginx restart
|
||||
|
||||
### 宝塔部署教程
|
||||
|
||||
详见[#175](https://github.com/songquanpeng/one-api/issues/175)。
|
||||
详见 [#175](https://github.com/songquanpeng/one-api/issues/175)。
|
||||
|
||||
如果部署后访问出现空白页面,详见[#97](https://github.com/songquanpeng/one-api/issues/97)。
|
||||
如果部署后访问出现空白页面,详见 [#97](https://github.com/songquanpeng/one-api/issues/97)。
|
||||
|
||||
### 部署第三方服务配合 One API 使用
|
||||
> 欢迎 PR 添加更多示例。
|
||||
@@ -277,7 +278,7 @@ https://openai.justsong.cn
|
||||
+ 大概率是你的部署站的 IP 或代理的节点被 CloudFlare 封禁了。
|
||||
|
||||
## 注意
|
||||
本项目为开源项目,请在遵循 OpenAI 的[使用条款](https://openai.com/policies/terms-of-use)以及法律法规的情况下使用,不得用于非法用途。
|
||||
本项目为开源项目,请在遵循 OpenAI 的[使用条款](https://openai.com/policies/terms-of-use)以及**法律法规**的情况下使用,不得用于非法用途。
|
||||
|
||||
本项目使用 MIT 协议进行开源,请以某种方式保留 One API 的版权信息。
|
||||
|
||||
|
@@ -15,6 +15,8 @@ var Footer = ""
|
||||
var Logo = ""
|
||||
var TopUpLink = ""
|
||||
var ChatLink = ""
|
||||
var QuotaPerUnit = 500 * 1000.0 // $0.002 / 1K tokens
|
||||
var DisplayInCurrencyEnabled = false
|
||||
|
||||
var UsingSQLite = false
|
||||
|
||||
|
@@ -42,3 +42,11 @@ func FatalLog(v ...any) {
|
||||
_, _ = fmt.Fprintf(gin.DefaultErrorWriter, "[FATAL] %v | %v \n", t.Format("2006/01/02 - 15:04:05"), v)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func LogQuota(quota int) string {
|
||||
if DisplayInCurrencyEnabled {
|
||||
return fmt.Sprintf("$%.6f 额度", float64(quota)/QuotaPerUnit)
|
||||
} else {
|
||||
return fmt.Sprintf("%d 点额度", quota)
|
||||
}
|
||||
}
|
||||
|
@@ -37,3 +37,18 @@ func ParseRedisOption() *redis.Options {
|
||||
}
|
||||
return opt
|
||||
}
|
||||
|
||||
func RedisSet(key string, value string, expiration time.Duration) error {
|
||||
ctx := context.Background()
|
||||
return RDB.Set(ctx, key, value, expiration).Err()
|
||||
}
|
||||
|
||||
func RedisGet(key string) (string, error) {
|
||||
ctx := context.Background()
|
||||
return RDB.Get(ctx, key).Result()
|
||||
}
|
||||
|
||||
func RedisDel(key string) error {
|
||||
ctx := context.Background()
|
||||
return RDB.Del(ctx, key).Err()
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ package controller
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
)
|
||||
|
||||
@@ -18,23 +19,38 @@ func GetSubscription(c *gin.Context) {
|
||||
})
|
||||
return
|
||||
}
|
||||
amount := float64(quota)
|
||||
if common.DisplayInCurrencyEnabled {
|
||||
amount /= common.QuotaPerUnit
|
||||
}
|
||||
subscription := OpenAISubscriptionResponse{
|
||||
Object: "billing_subscription",
|
||||
HasPaymentMethod: true,
|
||||
SoftLimitUSD: float64(quota),
|
||||
HardLimitUSD: float64(quota),
|
||||
SystemHardLimitUSD: float64(quota),
|
||||
SoftLimitUSD: amount,
|
||||
HardLimitUSD: amount,
|
||||
SystemHardLimitUSD: amount,
|
||||
}
|
||||
c.JSON(200, subscription)
|
||||
return
|
||||
}
|
||||
|
||||
func GetUsage(c *gin.Context) {
|
||||
//userId := c.GetInt("id")
|
||||
// TODO: get usage from database
|
||||
userId := c.GetInt("id")
|
||||
quota, err := model.GetUserUsedQuota(userId)
|
||||
if err != nil {
|
||||
openAIError := OpenAIError{
|
||||
Message: err.Error(),
|
||||
Type: "one_api_error",
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"error": openAIError,
|
||||
})
|
||||
return
|
||||
}
|
||||
amount := float64(quota)
|
||||
usage := OpenAIUsageResponse{
|
||||
Object: "list",
|
||||
TotalUsage: 0,
|
||||
TotalUsage: amount,
|
||||
}
|
||||
c.JSON(200, usage)
|
||||
return
|
||||
|
@@ -14,21 +14,23 @@ func GetStatus(c *gin.Context) {
|
||||
"success": true,
|
||||
"message": "",
|
||||
"data": gin.H{
|
||||
"version": common.Version,
|
||||
"start_time": common.StartTime,
|
||||
"email_verification": common.EmailVerificationEnabled,
|
||||
"github_oauth": common.GitHubOAuthEnabled,
|
||||
"github_client_id": common.GitHubClientId,
|
||||
"system_name": common.SystemName,
|
||||
"logo": common.Logo,
|
||||
"footer_html": common.Footer,
|
||||
"wechat_qrcode": common.WeChatAccountQRCodeImageURL,
|
||||
"wechat_login": common.WeChatAuthEnabled,
|
||||
"server_address": common.ServerAddress,
|
||||
"turnstile_check": common.TurnstileCheckEnabled,
|
||||
"turnstile_site_key": common.TurnstileSiteKey,
|
||||
"top_up_link": common.TopUpLink,
|
||||
"chat_link": common.ChatLink,
|
||||
"version": common.Version,
|
||||
"start_time": common.StartTime,
|
||||
"email_verification": common.EmailVerificationEnabled,
|
||||
"github_oauth": common.GitHubOAuthEnabled,
|
||||
"github_client_id": common.GitHubClientId,
|
||||
"system_name": common.SystemName,
|
||||
"logo": common.Logo,
|
||||
"footer_html": common.Footer,
|
||||
"wechat_qrcode": common.WeChatAccountQRCodeImageURL,
|
||||
"wechat_login": common.WeChatAuthEnabled,
|
||||
"server_address": common.ServerAddress,
|
||||
"turnstile_check": common.TurnstileCheckEnabled,
|
||||
"turnstile_site_key": common.TurnstileSiteKey,
|
||||
"top_up_link": common.TopUpLink,
|
||||
"chat_link": common.ChatLink,
|
||||
"quota_per_unit": common.QuotaPerUnit,
|
||||
"display_in_currency": common.DisplayInCurrencyEnabled,
|
||||
},
|
||||
})
|
||||
return
|
||||
|
34
controller/relay-image.go
Normal file
34
controller/relay-image.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
// TODO: this part is not finished
|
||||
req, err := http.NewRequest(c.Request.Method, c.Request.RequestURI, c.Request.Body)
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "do_request_failed", http.StatusOK)
|
||||
}
|
||||
err = req.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_request_body_failed", http.StatusOK)
|
||||
}
|
||||
for k, v := range resp.Header {
|
||||
c.Writer.Header().Set(k, v[0])
|
||||
}
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
_, err = io.Copy(c.Writer, resp.Body)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "copy_response_body_failed", http.StatusOK)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusOK)
|
||||
}
|
||||
return nil
|
||||
}
|
266
controller/relay-text.go
Normal file
266
controller/relay-text.go
Normal file
@@ -0,0 +1,266 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
channelType := c.GetInt("channel")
|
||||
tokenId := c.GetInt("token_id")
|
||||
consumeQuota := c.GetBool("consume_quota")
|
||||
group := c.GetString("group")
|
||||
var textRequest GeneralOpenAIRequest
|
||||
if consumeQuota || channelType == common.ChannelTypeAzure || channelType == common.ChannelTypePaLM {
|
||||
err := common.UnmarshalBodyReusable(c, &textRequest)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
if relayMode == RelayModeModeration && textRequest.Model == "" {
|
||||
textRequest.Model = "text-moderation-latest"
|
||||
}
|
||||
baseURL := common.ChannelBaseURLs[channelType]
|
||||
requestURL := c.Request.URL.String()
|
||||
if channelType == common.ChannelTypeCustom {
|
||||
baseURL = c.GetString("base_url")
|
||||
} else if channelType == common.ChannelTypeOpenAI {
|
||||
if c.GetString("base_url") != "" {
|
||||
baseURL = c.GetString("base_url")
|
||||
}
|
||||
}
|
||||
fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
|
||||
if channelType == common.ChannelTypeAzure {
|
||||
// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api
|
||||
query := c.Request.URL.Query()
|
||||
apiVersion := query.Get("api-version")
|
||||
if apiVersion == "" {
|
||||
apiVersion = c.GetString("api_version")
|
||||
}
|
||||
requestURL := strings.Split(requestURL, "?")[0]
|
||||
requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, apiVersion)
|
||||
baseURL = c.GetString("base_url")
|
||||
task := strings.TrimPrefix(requestURL, "/v1/")
|
||||
model_ := textRequest.Model
|
||||
model_ = strings.Replace(model_, ".", "", -1)
|
||||
// https://github.com/songquanpeng/one-api/issues/67
|
||||
model_ = strings.TrimSuffix(model_, "-0301")
|
||||
model_ = strings.TrimSuffix(model_, "-0314")
|
||||
model_ = strings.TrimSuffix(model_, "-0613")
|
||||
fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/%s", baseURL, model_, task)
|
||||
} else if channelType == common.ChannelTypePaLM {
|
||||
err := relayPaLM(textRequest, c)
|
||||
return err
|
||||
}
|
||||
var promptTokens int
|
||||
switch relayMode {
|
||||
case RelayModeChatCompletions:
|
||||
promptTokens = countTokenMessages(textRequest.Messages, textRequest.Model)
|
||||
case RelayModeCompletions:
|
||||
promptTokens = countTokenInput(textRequest.Prompt, textRequest.Model)
|
||||
case RelayModeModeration:
|
||||
promptTokens = countTokenInput(textRequest.Input, textRequest.Model)
|
||||
}
|
||||
preConsumedTokens := common.PreConsumedQuota
|
||||
if textRequest.MaxTokens != 0 {
|
||||
preConsumedTokens = promptTokens + textRequest.MaxTokens
|
||||
}
|
||||
modelRatio := common.GetModelRatio(textRequest.Model)
|
||||
groupRatio := common.GetGroupRatio(group)
|
||||
ratio := modelRatio * groupRatio
|
||||
preConsumedQuota := int(float64(preConsumedTokens) * ratio)
|
||||
if consumeQuota {
|
||||
err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusOK)
|
||||
}
|
||||
}
|
||||
req, err := http.NewRequest(c.Request.Method, fullRequestURL, c.Request.Body)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "new_request_failed", http.StatusOK)
|
||||
}
|
||||
if channelType == common.ChannelTypeAzure {
|
||||
key := c.Request.Header.Get("Authorization")
|
||||
key = strings.TrimPrefix(key, "Bearer ")
|
||||
req.Header.Set("api-key", key)
|
||||
} else {
|
||||
req.Header.Set("Authorization", c.Request.Header.Get("Authorization"))
|
||||
}
|
||||
req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
|
||||
req.Header.Set("Accept", c.Request.Header.Get("Accept"))
|
||||
req.Header.Set("Connection", c.Request.Header.Get("Connection"))
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "do_request_failed", http.StatusOK)
|
||||
}
|
||||
err = req.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_request_body_failed", http.StatusOK)
|
||||
}
|
||||
err = c.Request.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_request_body_failed", http.StatusOK)
|
||||
}
|
||||
var textResponse TextResponse
|
||||
isStream := strings.HasPrefix(resp.Header.Get("Content-Type"), "text/event-stream")
|
||||
var streamResponseText string
|
||||
|
||||
defer func() {
|
||||
if consumeQuota {
|
||||
quota := 0
|
||||
completionRatio := 1.34 // default for gpt-3
|
||||
if strings.HasPrefix(textRequest.Model, "gpt-4") {
|
||||
completionRatio = 2
|
||||
}
|
||||
if isStream {
|
||||
responseTokens := countTokenText(streamResponseText, textRequest.Model)
|
||||
quota = promptTokens + int(float64(responseTokens)*completionRatio)
|
||||
} else {
|
||||
quota = textResponse.Usage.PromptTokens + int(float64(textResponse.Usage.CompletionTokens)*completionRatio)
|
||||
}
|
||||
quota = int(float64(quota) * ratio)
|
||||
if ratio != 0 && quota <= 0 {
|
||||
quota = 1
|
||||
}
|
||||
quotaDelta := quota - preConsumedQuota
|
||||
err := model.PostConsumeTokenQuota(tokenId, quotaDelta)
|
||||
if err != nil {
|
||||
common.SysError("Error consuming token remain quota: " + err.Error())
|
||||
}
|
||||
tokenName := c.GetString("token_name")
|
||||
userId := c.GetInt("id")
|
||||
model.RecordLog(userId, model.LogTypeConsume, fmt.Sprintf("通过令牌「%s」使用模型 %s 消耗 %s(模型倍率 %.2f,分组倍率 %.2f)", tokenName, textRequest.Model, common.LogQuota(quota), modelRatio, groupRatio))
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
channelId := c.GetInt("channel_id")
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
}
|
||||
}()
|
||||
|
||||
if isStream {
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
if i := strings.Index(string(data), "\n\n"); i >= 0 {
|
||||
return i + 2, data[0:i], nil
|
||||
}
|
||||
|
||||
if atEOF {
|
||||
return len(data), data, nil
|
||||
}
|
||||
|
||||
return 0, nil, nil
|
||||
})
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 6 { // must be something wrong!
|
||||
common.SysError("Invalid stream response: " + data)
|
||||
continue
|
||||
}
|
||||
dataChan <- data
|
||||
data = data[6:]
|
||||
if !strings.HasPrefix(data, "[DONE]") {
|
||||
switch relayMode {
|
||||
case RelayModeChatCompletions:
|
||||
var streamResponse ChatCompletionsStreamResponse
|
||||
err = json.Unmarshal([]byte(data), &streamResponse)
|
||||
if err != nil {
|
||||
common.SysError("Error unmarshalling stream response: " + err.Error())
|
||||
return
|
||||
}
|
||||
for _, choice := range streamResponse.Choices {
|
||||
streamResponseText += choice.Delta.Content
|
||||
}
|
||||
case RelayModeCompletions:
|
||||
var streamResponse CompletionsStreamResponse
|
||||
err = json.Unmarshal([]byte(data), &streamResponse)
|
||||
if err != nil {
|
||||
common.SysError("Error unmarshalling stream response: " + err.Error())
|
||||
return
|
||||
}
|
||||
for _, choice := range streamResponse.Choices {
|
||||
streamResponseText += choice.Text
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
c.Writer.Header().Set("Content-Type", "text/event-stream")
|
||||
c.Writer.Header().Set("Cache-Control", "no-cache")
|
||||
c.Writer.Header().Set("Connection", "keep-alive")
|
||||
c.Writer.Header().Set("Transfer-Encoding", "chunked")
|
||||
c.Writer.Header().Set("X-Accel-Buffering", "no")
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
if strings.HasPrefix(data, "data: [DONE]") {
|
||||
data = data[:12]
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: data})
|
||||
return true
|
||||
case <-stopChan:
|
||||
return false
|
||||
}
|
||||
})
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusOK)
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
if consumeQuota {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusOK)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusOK)
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &textResponse)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusOK)
|
||||
}
|
||||
if textResponse.Error.Type != "" {
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: textResponse.Error,
|
||||
StatusCode: resp.StatusCode,
|
||||
}
|
||||
}
|
||||
// Reset response body
|
||||
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
||||
}
|
||||
// We shouldn't set the header before we parse the response body, because the parse part may fail.
|
||||
// And then we will have to send an error response, but in this case, the header has already been set.
|
||||
// So the client will be confused by the response.
|
||||
// For example, Postman will report error, and we cannot check the response at all.
|
||||
for k, v := range resp.Header {
|
||||
c.Writer.Header().Set(k, v[0])
|
||||
}
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
_, err = io.Copy(c.Writer, resp.Body)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "copy_response_body_failed", http.StatusOK)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusOK)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
@@ -77,3 +77,15 @@ func countTokenText(text string, model string) int {
|
||||
token := tokenEncoder.Encode(text, nil, nil)
|
||||
return len(token)
|
||||
}
|
||||
|
||||
func errorWrapper(err error, code string, statusCode int) *OpenAIErrorWithStatusCode {
|
||||
openAIError := OpenAIError{
|
||||
Message: err.Error(),
|
||||
Type: "one_api_error",
|
||||
Code: code,
|
||||
}
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: openAIError,
|
||||
StatusCode: statusCode,
|
||||
}
|
||||
}
|
||||
|
@@ -1,15 +1,10 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -25,6 +20,7 @@ const (
|
||||
RelayModeCompletions
|
||||
RelayModeEmbeddings
|
||||
RelayModeModeration
|
||||
RelayModeImagesGenerations
|
||||
)
|
||||
|
||||
// https://platform.openai.com/docs/api-reference/chat
|
||||
@@ -104,8 +100,16 @@ func Relay(c *gin.Context) {
|
||||
relayMode = RelayModeEmbeddings
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/moderations") {
|
||||
relayMode = RelayModeModeration
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") {
|
||||
relayMode = RelayModeImagesGenerations
|
||||
}
|
||||
var err *OpenAIErrorWithStatusCode
|
||||
switch relayMode {
|
||||
case RelayModeImagesGenerations:
|
||||
err = relayImageHelper(c, relayMode)
|
||||
default:
|
||||
err = relayTextHelper(c, relayMode)
|
||||
}
|
||||
err := relayHelper(c, relayMode)
|
||||
if err != nil {
|
||||
if err.StatusCode == http.StatusTooManyRequests {
|
||||
err.OpenAIError.Message = "当前分组负载已饱和,请稍后再试,或升级账户以提升服务质量。"
|
||||
@@ -124,270 +128,6 @@ func Relay(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func errorWrapper(err error, code string, statusCode int) *OpenAIErrorWithStatusCode {
|
||||
openAIError := OpenAIError{
|
||||
Message: err.Error(),
|
||||
Type: "one_api_error",
|
||||
Code: code,
|
||||
}
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: openAIError,
|
||||
StatusCode: statusCode,
|
||||
}
|
||||
}
|
||||
|
||||
func relayHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
channelType := c.GetInt("channel")
|
||||
tokenId := c.GetInt("token_id")
|
||||
consumeQuota := c.GetBool("consume_quota")
|
||||
group := c.GetString("group")
|
||||
var textRequest GeneralOpenAIRequest
|
||||
if consumeQuota || channelType == common.ChannelTypeAzure || channelType == common.ChannelTypePaLM {
|
||||
err := common.UnmarshalBodyReusable(c, &textRequest)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
if relayMode == RelayModeModeration && textRequest.Model == "" {
|
||||
textRequest.Model = "text-moderation-latest"
|
||||
}
|
||||
baseURL := common.ChannelBaseURLs[channelType]
|
||||
requestURL := c.Request.URL.String()
|
||||
if channelType == common.ChannelTypeCustom {
|
||||
baseURL = c.GetString("base_url")
|
||||
} else if channelType == common.ChannelTypeOpenAI {
|
||||
if c.GetString("base_url") != "" {
|
||||
baseURL = c.GetString("base_url")
|
||||
}
|
||||
}
|
||||
fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
|
||||
if channelType == common.ChannelTypeAzure {
|
||||
// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api
|
||||
query := c.Request.URL.Query()
|
||||
apiVersion := query.Get("api-version")
|
||||
if apiVersion == "" {
|
||||
apiVersion = c.GetString("api_version")
|
||||
}
|
||||
requestURL := strings.Split(requestURL, "?")[0]
|
||||
requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, apiVersion)
|
||||
baseURL = c.GetString("base_url")
|
||||
task := strings.TrimPrefix(requestURL, "/v1/")
|
||||
model_ := textRequest.Model
|
||||
model_ = strings.Replace(model_, ".", "", -1)
|
||||
// https://github.com/songquanpeng/one-api/issues/67
|
||||
model_ = strings.TrimSuffix(model_, "-0301")
|
||||
model_ = strings.TrimSuffix(model_, "-0314")
|
||||
model_ = strings.TrimSuffix(model_, "-0613")
|
||||
fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/%s", baseURL, model_, task)
|
||||
} else if channelType == common.ChannelTypePaLM {
|
||||
err := relayPaLM(textRequest, c)
|
||||
return err
|
||||
}
|
||||
var promptTokens int
|
||||
switch relayMode {
|
||||
case RelayModeChatCompletions:
|
||||
promptTokens = countTokenMessages(textRequest.Messages, textRequest.Model)
|
||||
case RelayModeCompletions:
|
||||
promptTokens = countTokenInput(textRequest.Prompt, textRequest.Model)
|
||||
case RelayModeModeration:
|
||||
promptTokens = countTokenInput(textRequest.Input, textRequest.Model)
|
||||
}
|
||||
preConsumedTokens := common.PreConsumedQuota
|
||||
if textRequest.MaxTokens != 0 {
|
||||
preConsumedTokens = promptTokens + textRequest.MaxTokens
|
||||
}
|
||||
modelRatio := common.GetModelRatio(textRequest.Model)
|
||||
groupRatio := common.GetGroupRatio(group)
|
||||
ratio := modelRatio * groupRatio
|
||||
preConsumedQuota := int(float64(preConsumedTokens) * ratio)
|
||||
if consumeQuota {
|
||||
err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusOK)
|
||||
}
|
||||
}
|
||||
req, err := http.NewRequest(c.Request.Method, fullRequestURL, c.Request.Body)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "new_request_failed", http.StatusOK)
|
||||
}
|
||||
if channelType == common.ChannelTypeAzure {
|
||||
key := c.Request.Header.Get("Authorization")
|
||||
key = strings.TrimPrefix(key, "Bearer ")
|
||||
req.Header.Set("api-key", key)
|
||||
} else {
|
||||
req.Header.Set("Authorization", c.Request.Header.Get("Authorization"))
|
||||
}
|
||||
req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
|
||||
req.Header.Set("Accept", c.Request.Header.Get("Accept"))
|
||||
req.Header.Set("Connection", c.Request.Header.Get("Connection"))
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "do_request_failed", http.StatusOK)
|
||||
}
|
||||
err = req.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_request_body_failed", http.StatusOK)
|
||||
}
|
||||
err = c.Request.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_request_body_failed", http.StatusOK)
|
||||
}
|
||||
var textResponse TextResponse
|
||||
isStream := strings.HasPrefix(resp.Header.Get("Content-Type"), "text/event-stream")
|
||||
var streamResponseText string
|
||||
|
||||
defer func() {
|
||||
if consumeQuota {
|
||||
quota := 0
|
||||
completionRatio := 1.34 // default for gpt-3
|
||||
if strings.HasPrefix(textRequest.Model, "gpt-4") {
|
||||
completionRatio = 2
|
||||
}
|
||||
if isStream {
|
||||
responseTokens := countTokenText(streamResponseText, textRequest.Model)
|
||||
quota = promptTokens + int(float64(responseTokens)*completionRatio)
|
||||
} else {
|
||||
quota = textResponse.Usage.PromptTokens + int(float64(textResponse.Usage.CompletionTokens)*completionRatio)
|
||||
}
|
||||
quota = int(float64(quota) * ratio)
|
||||
if ratio != 0 && quota <= 0 {
|
||||
quota = 1
|
||||
}
|
||||
quotaDelta := quota - preConsumedQuota
|
||||
err := model.PostConsumeTokenQuota(tokenId, quotaDelta)
|
||||
if err != nil {
|
||||
common.SysError("Error consuming token remain quota: " + err.Error())
|
||||
}
|
||||
tokenName := c.GetString("token_name")
|
||||
userId := c.GetInt("id")
|
||||
model.RecordLog(userId, model.LogTypeConsume, fmt.Sprintf("通过令牌「%s」使用模型 %s 消耗 %d 点额度(模型倍率 %.2f,分组倍率 %.2f)", tokenName, textRequest.Model, quota, modelRatio, groupRatio))
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
channelId := c.GetInt("channel_id")
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
}
|
||||
}()
|
||||
|
||||
if isStream {
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
if i := strings.Index(string(data), "\n\n"); i >= 0 {
|
||||
return i + 2, data[0:i], nil
|
||||
}
|
||||
|
||||
if atEOF {
|
||||
return len(data), data, nil
|
||||
}
|
||||
|
||||
return 0, nil, nil
|
||||
})
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 6 { // must be something wrong!
|
||||
common.SysError("Invalid stream response: " + data)
|
||||
continue
|
||||
}
|
||||
dataChan <- data
|
||||
data = data[6:]
|
||||
if !strings.HasPrefix(data, "[DONE]") {
|
||||
switch relayMode {
|
||||
case RelayModeChatCompletions:
|
||||
var streamResponse ChatCompletionsStreamResponse
|
||||
err = json.Unmarshal([]byte(data), &streamResponse)
|
||||
if err != nil {
|
||||
common.SysError("Error unmarshalling stream response: " + err.Error())
|
||||
return
|
||||
}
|
||||
for _, choice := range streamResponse.Choices {
|
||||
streamResponseText += choice.Delta.Content
|
||||
}
|
||||
case RelayModeCompletions:
|
||||
var streamResponse CompletionsStreamResponse
|
||||
err = json.Unmarshal([]byte(data), &streamResponse)
|
||||
if err != nil {
|
||||
common.SysError("Error unmarshalling stream response: " + err.Error())
|
||||
return
|
||||
}
|
||||
for _, choice := range streamResponse.Choices {
|
||||
streamResponseText += choice.Text
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
c.Writer.Header().Set("Content-Type", "text/event-stream")
|
||||
c.Writer.Header().Set("Cache-Control", "no-cache")
|
||||
c.Writer.Header().Set("Connection", "keep-alive")
|
||||
c.Writer.Header().Set("Transfer-Encoding", "chunked")
|
||||
c.Writer.Header().Set("X-Accel-Buffering", "no")
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
if strings.HasPrefix(data, "data: [DONE]") {
|
||||
data = data[:12]
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: data})
|
||||
return true
|
||||
case <-stopChan:
|
||||
return false
|
||||
}
|
||||
})
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusOK)
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
if consumeQuota {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusOK)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusOK)
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &textResponse)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusOK)
|
||||
}
|
||||
if textResponse.Error.Type != "" {
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: textResponse.Error,
|
||||
StatusCode: resp.StatusCode,
|
||||
}
|
||||
}
|
||||
// Reset response body
|
||||
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
||||
}
|
||||
// We shouldn't set the header before we parse the response body, because the parse part may fail.
|
||||
// And then we will have to send an error response, but in this case, the header has already been set.
|
||||
// So the client will be confused by the response.
|
||||
// For example, Postman will report error, and we cannot check the response at all.
|
||||
for k, v := range resp.Header {
|
||||
c.Writer.Header().Set(k, v[0])
|
||||
}
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
_, err = io.Copy(c.Writer, resp.Body)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "copy_response_body_failed", http.StatusOK)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusOK)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func RelayNotImplemented(c *gin.Context) {
|
||||
err := OpenAIError{
|
||||
Message: "API not implemented",
|
||||
|
@@ -384,7 +384,7 @@ func UpdateUser(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
if originUser.Quota != updatedUser.Quota {
|
||||
model.RecordLog(originUser.Id, model.LogTypeManage, fmt.Sprintf("管理员将用户额度从 %d 点修改为 %d 点", originUser.Quota, updatedUser.Quota))
|
||||
model.RecordLog(originUser.Id, model.LogTypeManage, fmt.Sprintf("管理员将用户额度从 %s修改为 %s", common.LogQuota(originUser.Quota), common.LogQuota(updatedUser.Quota)))
|
||||
}
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
|
6
main.go
6
main.go
@@ -47,12 +47,18 @@ func main() {
|
||||
|
||||
// Initialize options
|
||||
model.InitOptionMap()
|
||||
if common.RedisEnabled {
|
||||
model.InitChannelCache()
|
||||
}
|
||||
if os.Getenv("SYNC_FREQUENCY") != "" {
|
||||
frequency, err := strconv.Atoi(os.Getenv("SYNC_FREQUENCY"))
|
||||
if err != nil {
|
||||
common.FatalLog(err)
|
||||
}
|
||||
go model.SyncOptions(frequency)
|
||||
if common.RedisEnabled {
|
||||
go model.SyncChannelCache(frequency)
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize HTTP server
|
||||
|
@@ -17,7 +17,7 @@ type ModelRequest struct {
|
||||
func Distribute() func(c *gin.Context) {
|
||||
return func(c *gin.Context) {
|
||||
userId := c.GetInt("id")
|
||||
userGroup, _ := model.GetUserGroup(userId)
|
||||
userGroup, _ := model.CacheGetUserGroup(userId)
|
||||
c.Set("group", userGroup)
|
||||
var channel *model.Channel
|
||||
channelId, ok := c.Get("channelId")
|
||||
@@ -73,7 +73,7 @@ func Distribute() func(c *gin.Context) {
|
||||
modelRequest.Model = "text-moderation-stable"
|
||||
}
|
||||
}
|
||||
channel, err = model.GetRandomSatisfiedChannel(userGroup, modelRequest.Model)
|
||||
channel, err = model.CacheGetRandomSatisfiedChannel(userGroup, modelRequest.Model)
|
||||
if err != nil {
|
||||
c.JSON(200, gin.H{
|
||||
"error": gin.H{
|
||||
|
101
model/cache.go
Normal file
101
model/cache.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"one-api/common"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
TokenCacheSeconds = 60 * 60
|
||||
UserId2GroupCacheSeconds = 60 * 60
|
||||
)
|
||||
|
||||
func CacheGetTokenByKey(key string) (*Token, error) {
|
||||
var token Token
|
||||
if !common.RedisEnabled {
|
||||
err := DB.Where("`key` = ?", key).First(&token).Error
|
||||
return &token, err
|
||||
}
|
||||
tokenObjectString, err := common.RedisGet(fmt.Sprintf("token:%s", key))
|
||||
if err != nil {
|
||||
err := DB.Where("`key` = ?", key).First(&token).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jsonBytes, err := json.Marshal(token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = common.RedisSet(fmt.Sprintf("token:%s", key), string(jsonBytes), TokenCacheSeconds*time.Second)
|
||||
if err != nil {
|
||||
common.SysError("Redis set token error: " + err.Error())
|
||||
}
|
||||
return &token, nil
|
||||
}
|
||||
err = json.Unmarshal([]byte(tokenObjectString), &token)
|
||||
return &token, err
|
||||
}
|
||||
|
||||
func CacheGetUserGroup(id int) (group string, err error) {
|
||||
if !common.RedisEnabled {
|
||||
return GetUserGroup(id)
|
||||
}
|
||||
group, err = common.RedisGet(fmt.Sprintf("user_group:%d", id))
|
||||
if err != nil {
|
||||
group, err = GetUserGroup(id)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = common.RedisSet(fmt.Sprintf("user_group:%d", id), group, UserId2GroupCacheSeconds*time.Second)
|
||||
if err != nil {
|
||||
common.SysError("Redis set user group error: " + err.Error())
|
||||
}
|
||||
}
|
||||
return group, err
|
||||
}
|
||||
|
||||
var channelId2channel map[int]*Channel
|
||||
var channelSyncLock sync.RWMutex
|
||||
var group2model2channels map[string]map[string][]*Channel
|
||||
|
||||
func InitChannelCache() {
|
||||
channelSyncLock.Lock()
|
||||
defer channelSyncLock.Unlock()
|
||||
channelId2channel = make(map[int]*Channel)
|
||||
var channels []*Channel
|
||||
DB.Find(&channels)
|
||||
for _, channel := range channels {
|
||||
channelId2channel[channel.Id] = channel
|
||||
}
|
||||
var abilities []*Ability
|
||||
DB.Find(&abilities)
|
||||
groups := make(map[string]bool)
|
||||
for _, ability := range abilities {
|
||||
groups[ability.Group] = true
|
||||
}
|
||||
group2model2channels = make(map[string]map[string][]*Channel)
|
||||
for group := range groups {
|
||||
group2model2channels[group] = make(map[string][]*Channel)
|
||||
// TODO: implement this
|
||||
}
|
||||
}
|
||||
|
||||
func SyncChannelCache(frequency int) {
|
||||
for {
|
||||
time.Sleep(time.Duration(frequency) * time.Second)
|
||||
common.SysLog("Syncing channels from database")
|
||||
InitChannelCache()
|
||||
}
|
||||
}
|
||||
|
||||
func CacheGetRandomSatisfiedChannel(group string, model string) (*Channel, error) {
|
||||
if !common.RedisEnabled {
|
||||
return GetRandomSatisfiedChannel(group, model)
|
||||
}
|
||||
return GetRandomSatisfiedChannel(group, model)
|
||||
// TODO: implement this
|
||||
return nil, nil
|
||||
}
|
@@ -35,6 +35,7 @@ func InitOptionMap() {
|
||||
common.OptionMap["RegisterEnabled"] = strconv.FormatBool(common.RegisterEnabled)
|
||||
common.OptionMap["AutomaticDisableChannelEnabled"] = strconv.FormatBool(common.AutomaticDisableChannelEnabled)
|
||||
common.OptionMap["LogConsumeEnabled"] = strconv.FormatBool(common.LogConsumeEnabled)
|
||||
common.OptionMap["DisplayInCurrencyEnabled"] = strconv.FormatBool(common.DisplayInCurrencyEnabled)
|
||||
common.OptionMap["ChannelDisableThreshold"] = strconv.FormatFloat(common.ChannelDisableThreshold, 'f', -1, 64)
|
||||
common.OptionMap["SMTPServer"] = ""
|
||||
common.OptionMap["SMTPFrom"] = ""
|
||||
@@ -64,6 +65,7 @@ func InitOptionMap() {
|
||||
common.OptionMap["GroupRatio"] = common.GroupRatio2JSONString()
|
||||
common.OptionMap["TopUpLink"] = common.TopUpLink
|
||||
common.OptionMap["ChatLink"] = common.ChatLink
|
||||
common.OptionMap["QuotaPerUnit"] = strconv.FormatFloat(common.QuotaPerUnit, 'f', -1, 64)
|
||||
common.OptionMapRWMutex.Unlock()
|
||||
loadOptionsFromDatabase()
|
||||
}
|
||||
@@ -140,6 +142,8 @@ func updateOptionMap(key string, value string) (err error) {
|
||||
common.AutomaticDisableChannelEnabled = boolValue
|
||||
case "LogConsumeEnabled":
|
||||
common.LogConsumeEnabled = boolValue
|
||||
case "DisplayInCurrencyEnabled":
|
||||
common.DisplayInCurrencyEnabled = boolValue
|
||||
}
|
||||
}
|
||||
switch key {
|
||||
@@ -196,6 +200,8 @@ func updateOptionMap(key string, value string) (err error) {
|
||||
common.ChatLink = value
|
||||
case "ChannelDisableThreshold":
|
||||
common.ChannelDisableThreshold, _ = strconv.ParseFloat(value, 64)
|
||||
case "QuotaPerUnit":
|
||||
common.QuotaPerUnit, _ = strconv.ParseFloat(value, 64)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@@ -66,7 +66,7 @@ func Redeem(key string, userId int) (quota int, err error) {
|
||||
if err != nil {
|
||||
common.SysError("更新兑换码状态失败:" + err.Error())
|
||||
}
|
||||
RecordLog(userId, LogTypeTopup, fmt.Sprintf("通过兑换码充值 %d 点额度", redemption.Quota))
|
||||
RecordLog(userId, LogTypeTopup, fmt.Sprintf("通过兑换码充值 %s", common.LogQuota(redemption.Quota)))
|
||||
}()
|
||||
return redemption.Quota, nil
|
||||
}
|
||||
|
@@ -36,8 +36,7 @@ func ValidateUserToken(key string) (token *Token, err error) {
|
||||
if key == "" {
|
||||
return nil, errors.New("未提供 token")
|
||||
}
|
||||
token = &Token{}
|
||||
err = DB.Where("`key` = ?", key).First(token).Error
|
||||
token, err = CacheGetTokenByKey(key)
|
||||
if err == nil {
|
||||
if token.Status != common.TokenStatusEnabled {
|
||||
return nil, errors.New("该 token 状态不可用")
|
||||
|
@@ -93,16 +93,16 @@ func (user *User) Insert(inviterId int) error {
|
||||
return result.Error
|
||||
}
|
||||
if common.QuotaForNewUser > 0 {
|
||||
RecordLog(user.Id, LogTypeSystem, fmt.Sprintf("新用户注册赠送 %d 点额度", common.QuotaForNewUser))
|
||||
RecordLog(user.Id, LogTypeSystem, fmt.Sprintf("新用户注册赠送 %s", common.LogQuota(common.QuotaForNewUser)))
|
||||
}
|
||||
if inviterId != 0 {
|
||||
if common.QuotaForInvitee > 0 {
|
||||
_ = IncreaseUserQuota(user.Id, common.QuotaForInvitee)
|
||||
RecordLog(user.Id, LogTypeSystem, fmt.Sprintf("使用邀请码赠送 %d 点额度", common.QuotaForInvitee))
|
||||
RecordLog(user.Id, LogTypeSystem, fmt.Sprintf("使用邀请码赠送 %s", common.LogQuota(common.QuotaForInvitee)))
|
||||
}
|
||||
if common.QuotaForInviter > 0 {
|
||||
_ = IncreaseUserQuota(inviterId, common.QuotaForInviter)
|
||||
RecordLog(inviterId, LogTypeSystem, fmt.Sprintf("邀请用户赠送 %d 点额度", common.QuotaForInviter))
|
||||
RecordLog(inviterId, LogTypeSystem, fmt.Sprintf("邀请用户赠送 %s", common.LogQuota(common.QuotaForInviter)))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -256,6 +256,11 @@ func GetUserQuota(id int) (quota int, err error) {
|
||||
return quota, err
|
||||
}
|
||||
|
||||
func GetUserUsedQuota(id int) (quota int, err error) {
|
||||
err = DB.Model(&User{}).Where("id = ?", id).Select("used_quota").Find("a).Error
|
||||
return quota, err
|
||||
}
|
||||
|
||||
func GetUserEmail(id int) (email string, err error) {
|
||||
err = DB.Model(&User{}).Where("id = ?", id).Select("email").Find(&email).Error
|
||||
return email, err
|
||||
|
@@ -7,7 +7,7 @@
|
||||
<meta name="theme-color" content="#ffffff" />
|
||||
<meta
|
||||
name="description"
|
||||
content="Web site created using create-react-app"
|
||||
content="OpenAI 接口聚合管理,支持多种渠道包括 Azure,可用于二次分发管理 key,仅单可执行文件,已打包好 Docker 镜像,一键部署,开箱即用"
|
||||
/>
|
||||
<title>One API</title>
|
||||
</head>
|
||||
|
@@ -48,6 +48,8 @@ function App() {
|
||||
localStorage.setItem('system_name', data.system_name);
|
||||
localStorage.setItem('logo', data.logo);
|
||||
localStorage.setItem('footer_html', data.footer_html);
|
||||
localStorage.setItem('quota_per_unit', data.quota_per_unit);
|
||||
localStorage.setItem('display_in_currency', data.display_in_currency);
|
||||
if (data.chat_link) {
|
||||
localStorage.setItem('chat_link', data.chat_link);
|
||||
} else {
|
||||
|
@@ -13,9 +13,11 @@ const OperationSetting = () => {
|
||||
GroupRatio: '',
|
||||
TopUpLink: '',
|
||||
ChatLink: '',
|
||||
QuotaPerUnit: 0,
|
||||
AutomaticDisableChannelEnabled: '',
|
||||
ChannelDisableThreshold: 0,
|
||||
LogConsumeEnabled: ''
|
||||
LogConsumeEnabled: '',
|
||||
DisplayInCurrencyEnabled: ''
|
||||
});
|
||||
const [originInputs, setOriginInputs] = useState({});
|
||||
let [loading, setLoading] = useState(false);
|
||||
@@ -118,6 +120,9 @@ const OperationSetting = () => {
|
||||
if (originInputs['ChatLink'] !== inputs.ChatLink) {
|
||||
await updateOption('ChatLink', inputs.ChatLink);
|
||||
}
|
||||
if (originInputs['QuotaPerUnit'] !== inputs.QuotaPerUnit) {
|
||||
await updateOption('QuotaPerUnit', inputs.QuotaPerUnit);
|
||||
}
|
||||
break;
|
||||
}
|
||||
};
|
||||
@@ -129,7 +134,7 @@ const OperationSetting = () => {
|
||||
<Header as='h3'>
|
||||
通用设置
|
||||
</Header>
|
||||
<Form.Group widths={2}>
|
||||
<Form.Group widths={3}>
|
||||
<Form.Input
|
||||
label='充值链接'
|
||||
name='TopUpLink'
|
||||
@@ -148,6 +153,30 @@ const OperationSetting = () => {
|
||||
type='link'
|
||||
placeholder='例如 ChatGPT Next Web 的部署地址'
|
||||
/>
|
||||
<Form.Input
|
||||
label='额度汇率'
|
||||
name='QuotaPerUnit'
|
||||
onChange={handleInputChange}
|
||||
autoComplete='new-password'
|
||||
value={inputs.QuotaPerUnit}
|
||||
type='number'
|
||||
step='0.01'
|
||||
placeholder='一单位货币能兑换的额度'
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Group inline>
|
||||
<Form.Checkbox
|
||||
checked={inputs.LogConsumeEnabled === 'true'}
|
||||
label='启用额度消费日志记录'
|
||||
name='LogConsumeEnabled'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
<Form.Checkbox
|
||||
checked={inputs.DisplayInCurrencyEnabled === 'true'}
|
||||
label='以货币形式显示额度'
|
||||
name='DisplayInCurrencyEnabled'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Button onClick={() => {
|
||||
submitConfig('general').then();
|
||||
@@ -264,12 +293,6 @@ const OperationSetting = () => {
|
||||
placeholder='为一个 JSON 文本,键为分组名称,值为倍率'
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Checkbox
|
||||
checked={inputs.LogConsumeEnabled === 'true'}
|
||||
label='启用额度消费日志记录'
|
||||
name='LogConsumeEnabled'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
<Form.Button onClick={() => {
|
||||
submitConfig('ratio').then();
|
||||
}}>保存倍率设置</Form.Button>
|
||||
|
@@ -4,6 +4,7 @@ import { Link } from 'react-router-dom';
|
||||
import { API, copy, showError, showInfo, showSuccess, showWarning, timestamp2string } from '../helpers';
|
||||
|
||||
import { ITEMS_PER_PAGE } from '../constants';
|
||||
import { renderQuota } from '../helpers/render';
|
||||
|
||||
function renderTimestamp(timestamp) {
|
||||
return (
|
||||
@@ -220,7 +221,7 @@ const RedemptionsTable = () => {
|
||||
<Table.Cell>{redemption.id}</Table.Cell>
|
||||
<Table.Cell>{redemption.name ? redemption.name : '无'}</Table.Cell>
|
||||
<Table.Cell>{renderStatus(redemption.status)}</Table.Cell>
|
||||
<Table.Cell>{redemption.quota}</Table.Cell>
|
||||
<Table.Cell>{renderQuota(redemption.quota)}</Table.Cell>
|
||||
<Table.Cell>{renderTimestamp(redemption.created_time)}</Table.Cell>
|
||||
<Table.Cell>{redemption.redeemed_time ? renderTimestamp(redemption.redeemed_time) : "尚未兑换"} </Table.Cell>
|
||||
<Table.Cell>
|
||||
|
@@ -4,6 +4,7 @@ import { Link } from 'react-router-dom';
|
||||
import { API, copy, showError, showSuccess, showWarning, timestamp2string } from '../helpers';
|
||||
|
||||
import { ITEMS_PER_PAGE } from '../constants';
|
||||
import { renderQuota } from '../helpers/render';
|
||||
|
||||
function renderTimestamp(timestamp) {
|
||||
return (
|
||||
@@ -220,7 +221,7 @@ const TokensTable = () => {
|
||||
<Table.Row key={token.id}>
|
||||
<Table.Cell>{token.name ? token.name : '无'}</Table.Cell>
|
||||
<Table.Cell>{renderStatus(token.status)}</Table.Cell>
|
||||
<Table.Cell>{token.unlimited_quota ? '无限制' : token.remain_quota}</Table.Cell>
|
||||
<Table.Cell>{token.unlimited_quota ? '无限制' : renderQuota(token.remain_quota, 2)}</Table.Cell>
|
||||
<Table.Cell>{renderTimestamp(token.created_time)}</Table.Cell>
|
||||
<Table.Cell>{token.expired_time === -1 ? '永不过期' : renderTimestamp(token.expired_time)}</Table.Cell>
|
||||
<Table.Cell>
|
||||
|
@@ -4,7 +4,7 @@ import { Link } from 'react-router-dom';
|
||||
import { API, showError, showSuccess } from '../helpers';
|
||||
|
||||
import { ITEMS_PER_PAGE } from '../constants';
|
||||
import { renderGroup, renderNumber, renderText } from '../helpers/render';
|
||||
import { renderGroup, renderNumber, renderQuota, renderText } from '../helpers/render';
|
||||
|
||||
function renderRole(role) {
|
||||
switch (role) {
|
||||
@@ -244,8 +244,8 @@ const UsersTable = () => {
|
||||
{user.email ? <Popup hoverable content={user.email} trigger={<span>{renderText(user.email, 24)}</span>} /> : '无'}
|
||||
</Table.Cell>
|
||||
<Table.Cell>
|
||||
<Popup content='剩余额度' trigger={<Label>{renderNumber(user.quota)}</Label>} />
|
||||
<Popup content='已用额度' trigger={<Label>{renderNumber(user.used_quota)}</Label>} />
|
||||
<Popup content='剩余额度' trigger={<Label>{renderQuota(user.quota)}</Label>} />
|
||||
<Popup content='已用额度' trigger={<Label>{renderQuota(user.used_quota)}</Label>} />
|
||||
<Popup content='请求次数' trigger={<Label>{renderNumber(user.request_count)}</Label>} />
|
||||
</Table.Cell>
|
||||
<Table.Cell>{renderRole(user.role)}</Table.Cell>
|
||||
|
@@ -35,4 +35,15 @@ export function renderNumber(num) {
|
||||
} else {
|
||||
return num;
|
||||
}
|
||||
}
|
||||
|
||||
export function renderQuota(quota, digits = 2) {
|
||||
let quotaPerUnit = localStorage.getItem('quota_per_unit');
|
||||
let displayInCurrency = localStorage.getItem('display_in_currency');
|
||||
quotaPerUnit = parseFloat(quotaPerUnit);
|
||||
displayInCurrency = displayInCurrency === 'true';
|
||||
if (displayInCurrency) {
|
||||
return '$' + (quota / quotaPerUnit).toFixed(digits);
|
||||
}
|
||||
return renderNumber(quota);
|
||||
}
|
@@ -2,6 +2,7 @@ import React, { useEffect, useState } from 'react';
|
||||
import { Button, Form, Header, Segment } from 'semantic-ui-react';
|
||||
import { useParams } from 'react-router-dom';
|
||||
import { API, downloadTextAsFile, showError, showSuccess } from '../../helpers';
|
||||
import { renderQuota } from '../../helpers/render';
|
||||
|
||||
const EditRedemption = () => {
|
||||
const params = useParams();
|
||||
@@ -87,7 +88,7 @@ const EditRedemption = () => {
|
||||
</Form.Field>
|
||||
<Form.Field>
|
||||
<Form.Input
|
||||
label='额度'
|
||||
label={`额度(等价金额 ${renderQuota(quota)})`}
|
||||
name='quota'
|
||||
placeholder={'请输入单个兑换码中包含的额度'}
|
||||
onChange={handleInputChange}
|
||||
|
@@ -2,6 +2,7 @@ import React, { useEffect, useState } from 'react';
|
||||
import { Button, Form, Header, Message, Segment } from 'semantic-ui-react';
|
||||
import { useParams } from 'react-router-dom';
|
||||
import { API, showError, showSuccess, timestamp2string } from '../../helpers';
|
||||
import { renderQuota } from '../../helpers/render';
|
||||
|
||||
const EditToken = () => {
|
||||
const params = useParams();
|
||||
@@ -137,7 +138,7 @@ const EditToken = () => {
|
||||
<Message>注意,令牌的额度仅用于限制令牌本身的最大额度使用量,实际的使用受到账户的剩余额度限制。</Message>
|
||||
<Form.Field>
|
||||
<Form.Input
|
||||
label='额度'
|
||||
label={`额度(等价金额 ${renderQuota(remain_quota)})`}
|
||||
name='remain_quota'
|
||||
placeholder={'请输入额度'}
|
||||
onChange={handleInputChange}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
import React, { useEffect, useState } from 'react';
|
||||
import { Button, Form, Grid, Header, Segment, Statistic } from 'semantic-ui-react';
|
||||
import { API, showError, showInfo, showSuccess } from '../../helpers';
|
||||
import { renderQuota } from '../../helpers/render';
|
||||
|
||||
const TopUp = () => {
|
||||
const [redemptionCode, setRedemptionCode] = useState('');
|
||||
@@ -81,7 +82,7 @@ const TopUp = () => {
|
||||
<Grid.Column>
|
||||
<Statistic.Group widths='one'>
|
||||
<Statistic>
|
||||
<Statistic.Value>{userQuota.toLocaleString()}</Statistic.Value>
|
||||
<Statistic.Value>{renderQuota(userQuota)}</Statistic.Value>
|
||||
<Statistic.Label>剩余额度</Statistic.Label>
|
||||
</Statistic>
|
||||
</Statistic.Group>
|
||||
|
Reference in New Issue
Block a user