mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-10-23 01:43:42 +08:00
Compare commits
16 Commits
v0.4.9-alp
...
v0.4.10-al
Author | SHA1 | Date | |
---|---|---|---|
|
4139a7036f | ||
|
02da0b51f8 | ||
|
35cfebee12 | ||
|
0e088f7c3e | ||
|
f61d326721 | ||
|
74b06b643a | ||
|
ccf7709e23 | ||
|
d592e2c8b8 | ||
|
b520b54625 | ||
|
81c5901123 | ||
|
abc53cb208 | ||
|
2b17bb8dd7 | ||
|
ea73201b6f | ||
|
6215d2e71c | ||
|
d17bdc40a7 | ||
|
280df27705 |
17
README.md
17
README.md
@@ -51,7 +51,7 @@ _✨ All in one 的 OpenAI 接口,整合各种 API 访问方式,开箱即用
|
||||
<a href="https://iamazing.cn/page/reward">赞赏支持</a>
|
||||
</p>
|
||||
|
||||
> **Note**:本项目为开源项目,请在遵循 OpenAI 的[使用条款](https://openai.com/policies/terms-of-use)以及**法律法规**的情况下使用,不得用于非法用途。
|
||||
> **Note**:本项目为开源项目,使用者必须在遵循 OpenAI 的[使用条款](https://openai.com/policies/terms-of-use)以及**法律法规**的情况下使用,不得用于非法用途。
|
||||
|
||||
> **Note**:使用 Docker 拉取的最新镜像可能是 `alpha` 版本,如果追求稳定性请手动指定版本。
|
||||
|
||||
@@ -81,16 +81,19 @@ _✨ All in one 的 OpenAI 接口,整合各种 API 访问方式,开箱即用
|
||||
12. 支持以美元为单位显示额度。
|
||||
13. 支持发布公告,设置充值链接,设置新用户初始额度。
|
||||
14. 支持模型映射,重定向用户的请求模型。
|
||||
15. 支持丰富的**自定义**设置,
|
||||
15. 支持失败自动重试。
|
||||
16. 支持绘图接口。
|
||||
17. 支持丰富的**自定义**设置,
|
||||
1. 支持自定义系统名称,logo 以及页脚。
|
||||
2. 支持自定义首页和关于页面,可以选择使用 HTML & Markdown 代码进行自定义,或者使用一个单独的网页通过 iframe 嵌入。
|
||||
16. 支持通过系统访问令牌访问管理 API。
|
||||
17. 支持 Cloudflare Turnstile 用户校验。
|
||||
18. 支持用户管理,支持**多种用户登录注册方式**:
|
||||
18. 支持通过系统访问令牌访问管理 API。
|
||||
19. 支持 Cloudflare Turnstile 用户校验。
|
||||
20. 支持用户管理,支持**多种用户登录注册方式**:
|
||||
+ 邮箱登录注册以及通过邮箱进行密码重置。
|
||||
+ [GitHub 开放授权](https://github.com/settings/applications/new)。
|
||||
+ 微信公众号授权(需要额外部署 [WeChat Server](https://github.com/songquanpeng/wechat-server))。
|
||||
19. 未来其他大模型开放 API 后,将第一时间支持,并将其封装成同样的 API 访问方式。
|
||||
21. 支持 [ChatGLM](https://github.com/THUDM/ChatGLM2-6B)。
|
||||
22. 未来其他大模型开放 API 后,将第一时间支持,并将其封装成同样的 API 访问方式。
|
||||
|
||||
## 部署
|
||||
### 基于 Docker 进行部署
|
||||
@@ -189,7 +192,7 @@ sudo service nginx restart
|
||||
docker run --name chat-next-web -d -p 3001:3000 yidadaa/chatgpt-next-web
|
||||
```
|
||||
|
||||
注意修改端口号和 `BASE_URL`。
|
||||
注意修改端口号,之后在页面上设置接口地址(例如:https://openai.justsong.cn/ )和 API Key 即可。
|
||||
|
||||
#### ChatGPT Web
|
||||
项目主页:https://github.com/Chanzhaoyu/chatgpt-web
|
||||
|
@@ -68,6 +68,7 @@ var AutomaticDisableChannelEnabled = false
|
||||
var QuotaRemindThreshold = 1000
|
||||
var PreConsumedQuota = 500
|
||||
var ApproximateTokenEnabled = false
|
||||
var RetryTimes = 0
|
||||
|
||||
var RootUserEmail = ""
|
||||
|
||||
|
@@ -35,6 +35,7 @@ var ModelRatio = map[string]float64{
|
||||
"text-search-ada-doc-001": 10,
|
||||
"text-moderation-stable": 0.1,
|
||||
"text-moderation-latest": 0.1,
|
||||
"dall-e": 8,
|
||||
}
|
||||
|
||||
func ModelRatio2JSONString() string {
|
||||
|
@@ -7,16 +7,19 @@ import (
|
||||
)
|
||||
|
||||
func GetSubscription(c *gin.Context) {
|
||||
var quota int
|
||||
var remainQuota int
|
||||
var usedQuota int
|
||||
var err error
|
||||
var token *model.Token
|
||||
if common.DisplayTokenStatEnabled {
|
||||
tokenId := c.GetInt("token_id")
|
||||
token, err = model.GetTokenById(tokenId)
|
||||
quota = token.RemainQuota
|
||||
remainQuota = token.RemainQuota
|
||||
usedQuota = token.UsedQuota
|
||||
} else {
|
||||
userId := c.GetInt("id")
|
||||
quota, err = model.GetUserQuota(userId)
|
||||
remainQuota, err = model.GetUserQuota(userId)
|
||||
usedQuota, err = model.GetUserUsedQuota(userId)
|
||||
}
|
||||
if err != nil {
|
||||
openAIError := OpenAIError{
|
||||
@@ -28,6 +31,7 @@ func GetSubscription(c *gin.Context) {
|
||||
})
|
||||
return
|
||||
}
|
||||
quota := remainQuota + usedQuota
|
||||
amount := float64(quota)
|
||||
if common.DisplayInCurrencyEnabled {
|
||||
amount /= common.QuotaPerUnit
|
||||
|
@@ -2,6 +2,7 @@ package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
@@ -53,6 +54,15 @@ func init() {
|
||||
})
|
||||
// https://platform.openai.com/docs/models/model-endpoint-compatibility
|
||||
openAIModels = []OpenAIModels{
|
||||
{
|
||||
Id: "dall-e",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "dall-e",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-3.5-turbo",
|
||||
Object: "model",
|
||||
@@ -242,6 +252,24 @@ func init() {
|
||||
Root: "code-davinci-edit-001",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "ChatGLM",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "thudm",
|
||||
Permission: permission,
|
||||
Root: "ChatGLM",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "ChatGLM2",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "thudm",
|
||||
Permission: permission,
|
||||
Root: "ChatGLM2",
|
||||
Parent: nil,
|
||||
},
|
||||
}
|
||||
openAIModelsMap = make(map[string]OpenAIModels)
|
||||
for _, model := range openAIModels {
|
||||
|
@@ -1,34 +1,181 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
// TODO: this part is not finished
|
||||
req, err := http.NewRequest(c.Request.Method, c.Request.RequestURI, c.Request.Body)
|
||||
imageModel := "dall-e"
|
||||
|
||||
tokenId := c.GetInt("token_id")
|
||||
channelType := c.GetInt("channel")
|
||||
userId := c.GetInt("id")
|
||||
consumeQuota := c.GetBool("consume_quota")
|
||||
group := c.GetString("group")
|
||||
|
||||
var imageRequest ImageRequest
|
||||
if consumeQuota {
|
||||
err := common.UnmarshalBodyReusable(c, &imageRequest)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
// Prompt validation
|
||||
if imageRequest.Prompt == "" {
|
||||
return errorWrapper(errors.New("prompt is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// Not "256x256", "512x512", or "1024x1024"
|
||||
if imageRequest.Size != "" && imageRequest.Size != "256x256" && imageRequest.Size != "512x512" && imageRequest.Size != "1024x1024" {
|
||||
return errorWrapper(errors.New("size must be one of 256x256, 512x512, or 1024x1024"), "invalid_field_value", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// N should between 1 and 10
|
||||
if imageRequest.N != 0 && (imageRequest.N < 1 || imageRequest.N > 10) {
|
||||
return errorWrapper(errors.New("n must be between 1 and 10"), "invalid_field_value", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// map model name
|
||||
modelMapping := c.GetString("model_mapping")
|
||||
isModelMapped := false
|
||||
if modelMapping != "" {
|
||||
modelMap := make(map[string]string)
|
||||
err := json.Unmarshal([]byte(modelMapping), &modelMap)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
|
||||
}
|
||||
if modelMap[imageModel] != "" {
|
||||
imageModel = modelMap[imageModel]
|
||||
isModelMapped = true
|
||||
}
|
||||
}
|
||||
|
||||
baseURL := common.ChannelBaseURLs[channelType]
|
||||
requestURL := c.Request.URL.String()
|
||||
|
||||
if c.GetString("base_url") != "" {
|
||||
baseURL = c.GetString("base_url")
|
||||
}
|
||||
|
||||
fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
|
||||
|
||||
var requestBody io.Reader
|
||||
if isModelMapped {
|
||||
jsonStr, err := json.Marshal(imageRequest)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
requestBody = bytes.NewBuffer(jsonStr)
|
||||
} else {
|
||||
requestBody = c.Request.Body
|
||||
}
|
||||
|
||||
modelRatio := common.GetModelRatio(imageModel)
|
||||
groupRatio := common.GetGroupRatio(group)
|
||||
ratio := modelRatio * groupRatio
|
||||
userQuota, err := model.CacheGetUserQuota(userId)
|
||||
|
||||
sizeRatio := 1.0
|
||||
// Size
|
||||
if imageRequest.Size == "256x256" {
|
||||
sizeRatio = 1
|
||||
} else if imageRequest.Size == "512x512" {
|
||||
sizeRatio = 1.125
|
||||
} else if imageRequest.Size == "1024x1024" {
|
||||
sizeRatio = 1.25
|
||||
}
|
||||
quota := int(ratio*sizeRatio*1000) * imageRequest.N
|
||||
|
||||
if consumeQuota && userQuota-quota < 0 {
|
||||
return errorWrapper(err, "insufficient_user_quota", http.StatusForbidden)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
req.Header.Set("Authorization", c.Request.Header.Get("Authorization"))
|
||||
|
||||
req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
|
||||
req.Header.Set("Accept", c.Request.Header.Get("Accept"))
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "do_request_failed", http.StatusOK)
|
||||
return errorWrapper(err, "do_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
err = req.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_request_body_failed", http.StatusOK)
|
||||
return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = c.Request.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
var textResponse ImageResponse
|
||||
|
||||
defer func() {
|
||||
if consumeQuota {
|
||||
err := model.PostConsumeTokenQuota(tokenId, quota)
|
||||
if err != nil {
|
||||
common.SysError("error consuming token remain quota: " + err.Error())
|
||||
}
|
||||
err = model.CacheUpdateUserQuota(userId)
|
||||
if err != nil {
|
||||
common.SysError("error update user quota cache: " + err.Error())
|
||||
}
|
||||
if quota != 0 {
|
||||
tokenName := c.GetString("token_name")
|
||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||
model.RecordConsumeLog(userId, 0, 0, imageModel, tokenName, quota, logContent)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
channelId := c.GetInt("channel_id")
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if consumeQuota {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
|
||||
if err != nil {
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &textResponse)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
||||
}
|
||||
|
||||
for k, v := range resp.Header {
|
||||
c.Writer.Header().Set(k, v[0])
|
||||
}
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
|
||||
_, err = io.Copy(c.Writer, resp.Body)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "copy_response_body_failed", http.StatusOK)
|
||||
return errorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusOK)
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -6,12 +6,13 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
@@ -30,6 +31,9 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
if relayMode == RelayModeModerations && textRequest.Model == "" {
|
||||
textRequest.Model = "text-moderation-latest"
|
||||
}
|
||||
if relayMode == RelayModeEmbeddings && textRequest.Model == "" {
|
||||
textRequest.Model = c.Param("model")
|
||||
}
|
||||
// request validation
|
||||
if textRequest.Model == "" {
|
||||
return errorWrapper(errors.New("model is required"), "required_field_missing", http.StatusBadRequest)
|
||||
@@ -223,8 +227,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
if i := strings.Index(string(data), "\n\n"); i >= 0 {
|
||||
return i + 2, data[0:i], nil
|
||||
if i := strings.Index(string(data), "\n"); i >= 0 {
|
||||
return i + 1, data[0:i], nil
|
||||
}
|
||||
|
||||
if atEOF {
|
||||
@@ -238,8 +242,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 6 { // must be something wrong!
|
||||
common.SysError("invalid stream response: " + data)
|
||||
if len(data) < 6 { // ignore blank line or wrong format
|
||||
continue
|
||||
}
|
||||
dataChan <- data
|
||||
@@ -282,6 +285,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
if strings.HasPrefix(data, "data: [DONE]") {
|
||||
data = data[:12]
|
||||
}
|
||||
// some implementations may add \r at the end of data
|
||||
data = strings.TrimSuffix(data, "\r")
|
||||
c.Render(-1, common.CustomEvent{Data: data})
|
||||
return true
|
||||
case <-stopChan:
|
||||
|
@@ -2,10 +2,12 @@ package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
@@ -37,6 +39,7 @@ type GeneralOpenAIRequest struct {
|
||||
N int `json:"n,omitempty"`
|
||||
Input any `json:"input,omitempty"`
|
||||
Instruction string `json:"instruction,omitempty"`
|
||||
Size string `json:"size,omitempty"`
|
||||
}
|
||||
|
||||
type ChatRequest struct {
|
||||
@@ -53,6 +56,12 @@ type TextRequest struct {
|
||||
//Stream bool `json:"stream"`
|
||||
}
|
||||
|
||||
type ImageRequest struct {
|
||||
Prompt string `json:"prompt"`
|
||||
N int `json:"n"`
|
||||
Size string `json:"size"`
|
||||
}
|
||||
|
||||
type Usage struct {
|
||||
PromptTokens int `json:"prompt_tokens"`
|
||||
CompletionTokens int `json:"completion_tokens"`
|
||||
@@ -76,6 +85,13 @@ type TextResponse struct {
|
||||
Error OpenAIError `json:"error"`
|
||||
}
|
||||
|
||||
type ImageResponse struct {
|
||||
Created int `json:"created"`
|
||||
Data []struct {
|
||||
Url string `json:"url"`
|
||||
}
|
||||
}
|
||||
|
||||
type ChatCompletionsStreamResponse struct {
|
||||
Choices []struct {
|
||||
Delta struct {
|
||||
@@ -100,6 +116,8 @@ func Relay(c *gin.Context) {
|
||||
relayMode = RelayModeCompletions
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/embeddings") {
|
||||
relayMode = RelayModeEmbeddings
|
||||
} else if strings.HasSuffix(c.Request.URL.Path, "embeddings") {
|
||||
relayMode = RelayModeEmbeddings
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/moderations") {
|
||||
relayMode = RelayModeModerations
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") {
|
||||
@@ -115,16 +133,25 @@ func Relay(c *gin.Context) {
|
||||
err = relayTextHelper(c, relayMode)
|
||||
}
|
||||
if err != nil {
|
||||
if err.StatusCode == http.StatusTooManyRequests {
|
||||
err.OpenAIError.Message = "当前分组负载已饱和,请稍后再试,或升级账户以提升服务质量。"
|
||||
retryTimesStr := c.Query("retry")
|
||||
retryTimes, _ := strconv.Atoi(retryTimesStr)
|
||||
if retryTimesStr == "" {
|
||||
retryTimes = common.RetryTimes
|
||||
}
|
||||
if retryTimes > 0 {
|
||||
c.Redirect(http.StatusTemporaryRedirect, fmt.Sprintf("%s?retry=%d", c.Request.URL.Path, retryTimes-1))
|
||||
} else {
|
||||
if err.StatusCode == http.StatusTooManyRequests {
|
||||
err.OpenAIError.Message = "当前分组负载已饱和,请稍后再试,或升级账户以提升服务质量。"
|
||||
}
|
||||
c.JSON(err.StatusCode, gin.H{
|
||||
"error": err.OpenAIError,
|
||||
})
|
||||
}
|
||||
c.JSON(err.StatusCode, gin.H{
|
||||
"error": err.OpenAIError,
|
||||
})
|
||||
channelId := c.GetInt("channel_id")
|
||||
common.SysError(fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message))
|
||||
// https://platform.openai.com/docs/guides/error-codes/api-errors
|
||||
if common.AutomaticDisableChannelEnabled && (err.Type == "insufficient_quota" || err.Code == "invalid_api_key") {
|
||||
if common.AutomaticDisableChannelEnabled && (err.Type == "insufficient_quota" || err.Code == "invalid_api_key" || err.Code == "account_deactivated") {
|
||||
channelId := c.GetInt("channel_id")
|
||||
channelName := c.GetString("channel_name")
|
||||
disableChannel(channelId, channelName, err.Message)
|
||||
|
47
i18n/en.json
47
i18n/en.json
@@ -107,6 +107,11 @@
|
||||
"已禁用": "Disabled",
|
||||
"未知状态": "Unknown status",
|
||||
" 秒": "s",
|
||||
" 分钟 ": " m ",
|
||||
" 小时 ": " h ",
|
||||
" 天 ": " d ",
|
||||
" 个月 ": " M ",
|
||||
" 年 ": " y ",
|
||||
"未测试": "Not tested",
|
||||
"通道 ${name} 测试成功,耗时 ${time.toFixed(2)} 秒。": "Channel ${name} test succeeded, time consumed ${time.toFixed(2)} s.",
|
||||
"已成功开始测试所有已启用通道,请刷新页面查看结果。": "All enabled channels have been successfully tested, please refresh the page to view the results.",
|
||||
@@ -458,5 +463,45 @@
|
||||
"消耗额度": "Used Quota",
|
||||
"可选值": "Optional Values",
|
||||
"渠道不存在:%d": "Channel does not exist: %d",
|
||||
"数据库一致性已被破坏,请联系管理员": "Database consistency has been broken, please contact the administrator"
|
||||
"数据库一致性已被破坏,请联系管理员": "Database consistency has been broken, please contact the administrator",
|
||||
"使用近似的方式估算 token 数以减少计算量": "Estimate the number of tokens in an approximate way to reduce computational load",
|
||||
"请填写ChannelName和ChannelKey!": "Please fill in the ChannelName and ChannelKey!",
|
||||
"请至少选择一个Model!": "Please select at least one Model!",
|
||||
"加载首页内容失败": "Failed to load the homepage content",
|
||||
"加载关于内容失败": "Failed to load the About content",
|
||||
"兑换码更新成功!": "Redemption code updated successfully!",
|
||||
"兑换码创建成功!": "Redemption code created successfully!",
|
||||
"用户账户创建成功!": "User account created successfully!",
|
||||
"生成数量": "Generate quantity",
|
||||
"请输入生成数量": "Please enter the quantity to generate",
|
||||
"创建新用户账户": "Create new user account",
|
||||
"渠道更新成功!": "Channel updated successfully!",
|
||||
"渠道创建成功!": "Channel created successfully!",
|
||||
"请选择分组": "Please select a group",
|
||||
"更新兑换码信息": "Update redemption code information",
|
||||
"创建新的兑换码": "Create a new redemption code",
|
||||
"请在系统设置页面编辑分组倍率以添加新的分组:": "Please edit the group ratio in the system settings page to add a new group:",
|
||||
"未找到所请求的页面": "The requested page was not found",
|
||||
"过期时间格式错误!": "Expiration time format error!",
|
||||
"请输入过期时间,格式为 yyyy-MM-dd HH:mm:ss,-1 表示无限制": "Please enter the expiration time, the format is yyyy-MM-dd HH:mm:ss, -1 means no limit",
|
||||
"此项可选,为一个 JSON 文本,键为用户请求的模型名称,值为要替换的模型名称,例如:": "This is optional, it's a JSON text, the key is the model name requested by the user, and the value is the model name to be replaced, for example:",
|
||||
"此项可选,输入镜像站地址,格式为:": "This is optional, enter the mirror site address, the format is:",
|
||||
"模型映射": "Model mapping",
|
||||
"请输入默认 API 版本,例如:2023-03-15-preview,该配置可以被实际的请求查询参数所覆盖": "Please enter the default API version, for example: 2023-03-15-preview, this configuration can be overridden by the actual request query parameters",
|
||||
"默认": "Default",
|
||||
"图片演示": "Image demo",
|
||||
"参数替换为你的部署名称(模型名称中的点会被剔除)": "Replace the parameter with your deployment name (dots in the model name will be removed)",
|
||||
"模型映射必须是合法的 JSON 格式!": "Model mapping must be in valid JSON format!",
|
||||
"取消无限额度": "Cancel unlimited quota",
|
||||
"请输入新的剩余额度": "Please enter the new remaining quota",
|
||||
"请输入单个兑换码中包含的额度": "Please enter the quota included in a single redemption code",
|
||||
"请输入用户名": "Please enter username",
|
||||
"请输入显示名称": "Please enter display name",
|
||||
"请输入密码": "Please enter password",
|
||||
"模型部署名称必须和模型名称保持一致": "The model deployment name must be consistent with the model name",
|
||||
",因为 One API 会把请求体中的 model": ", because One API will take the model in the request body",
|
||||
"请输入 AZURE_OPENAI_ENDPOINT": "Please enter AZURE_OPENAI_ENDPOINT",
|
||||
"请输入自定义渠道的 Base URL": "Please enter the Base URL of the custom channel",
|
||||
"Homepage URL 填": "Fill in the Homepage URL",
|
||||
"Authorization callback URL 填": "Fill in the Authorization callback URL"
|
||||
}
|
||||
|
@@ -2,12 +2,13 @@ package middleware
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type ModelRequest struct {
|
||||
@@ -73,6 +74,16 @@ func Distribute() func(c *gin.Context) {
|
||||
modelRequest.Model = "text-moderation-stable"
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(c.Request.URL.Path, "embeddings") {
|
||||
if modelRequest.Model == "" {
|
||||
modelRequest.Model = c.Param("model")
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") {
|
||||
if modelRequest.Model == "" {
|
||||
modelRequest.Model = "dall-e"
|
||||
}
|
||||
}
|
||||
channel, err = model.CacheGetRandomSatisfiedChannel(userGroup, modelRequest.Model)
|
||||
if err != nil {
|
||||
message := "无可用渠道"
|
||||
|
@@ -68,6 +68,7 @@ func InitOptionMap() {
|
||||
common.OptionMap["TopUpLink"] = common.TopUpLink
|
||||
common.OptionMap["ChatLink"] = common.ChatLink
|
||||
common.OptionMap["QuotaPerUnit"] = strconv.FormatFloat(common.QuotaPerUnit, 'f', -1, 64)
|
||||
common.OptionMap["RetryTimes"] = strconv.Itoa(common.RetryTimes)
|
||||
common.OptionMapRWMutex.Unlock()
|
||||
loadOptionsFromDatabase()
|
||||
}
|
||||
@@ -196,6 +197,8 @@ func updateOptionMap(key string, value string) (err error) {
|
||||
common.QuotaRemindThreshold, _ = strconv.Atoi(value)
|
||||
case "PreConsumedQuota":
|
||||
common.PreConsumedQuota, _ = strconv.Atoi(value)
|
||||
case "RetryTimes":
|
||||
common.RetryTimes, _ = strconv.Atoi(value)
|
||||
case "ModelRatio":
|
||||
err = common.UpdateModelRatioByJSONString(value)
|
||||
case "GroupRatio":
|
||||
|
@@ -3,6 +3,7 @@ package model
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"gorm.io/gorm"
|
||||
"one-api/common"
|
||||
)
|
||||
|
||||
@@ -48,26 +49,27 @@ func Redeem(key string, userId int) (quota int, err error) {
|
||||
return 0, errors.New("无效的 user id")
|
||||
}
|
||||
redemption := &Redemption{}
|
||||
err = DB.Where("`key` = ?", key).First(redemption).Error
|
||||
if err != nil {
|
||||
return 0, errors.New("无效的兑换码")
|
||||
}
|
||||
if redemption.Status != common.RedemptionCodeStatusEnabled {
|
||||
return 0, errors.New("该兑换码已被使用")
|
||||
}
|
||||
err = IncreaseUserQuota(userId, redemption.Quota)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
go func() {
|
||||
|
||||
err = DB.Transaction(func(tx *gorm.DB) error {
|
||||
err := DB.Where("`key` = ?", key).First(redemption).Error
|
||||
if err != nil {
|
||||
return errors.New("无效的兑换码")
|
||||
}
|
||||
if redemption.Status != common.RedemptionCodeStatusEnabled {
|
||||
return errors.New("该兑换码已被使用")
|
||||
}
|
||||
err = DB.Model(&User{}).Where("id = ?", userId).Update("quota", gorm.Expr("quota + ?", redemption.Quota)).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
redemption.RedeemedTime = common.GetTimestamp()
|
||||
redemption.Status = common.RedemptionCodeStatusUsed
|
||||
err := redemption.SelectUpdate()
|
||||
if err != nil {
|
||||
common.SysError("failed to update redemption status: " + err.Error())
|
||||
}
|
||||
RecordLog(userId, LogTypeTopup, fmt.Sprintf("通过兑换码充值 %s", common.LogQuota(redemption.Quota)))
|
||||
}()
|
||||
return redemption.SelectUpdate()
|
||||
})
|
||||
if err != nil {
|
||||
return 0, errors.New("兑换失败," + err.Error())
|
||||
}
|
||||
RecordLog(userId, LogTypeTopup, fmt.Sprintf("通过兑换码充值 %s", common.LogQuota(redemption.Quota)))
|
||||
return redemption.Quota, nil
|
||||
}
|
||||
|
||||
|
@@ -1,10 +1,11 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
"github.com/gin-contrib/gzip"
|
||||
"github.com/gin-gonic/gin"
|
||||
"one-api/controller"
|
||||
"one-api/middleware"
|
||||
|
||||
"github.com/gin-contrib/gzip"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func SetApiRouter(router *gin.Engine) {
|
||||
|
@@ -1,9 +1,10 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"one-api/controller"
|
||||
"one-api/middleware"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func SetRelayRouter(router *gin.Engine) {
|
||||
@@ -20,10 +21,11 @@ func SetRelayRouter(router *gin.Engine) {
|
||||
relayV1Router.POST("/completions", controller.Relay)
|
||||
relayV1Router.POST("/chat/completions", controller.Relay)
|
||||
relayV1Router.POST("/edits", controller.Relay)
|
||||
relayV1Router.POST("/images/generations", controller.RelayNotImplemented)
|
||||
relayV1Router.POST("/images/generations", controller.Relay)
|
||||
relayV1Router.POST("/images/edits", controller.RelayNotImplemented)
|
||||
relayV1Router.POST("/images/variations", controller.RelayNotImplemented)
|
||||
relayV1Router.POST("/embeddings", controller.Relay)
|
||||
relayV1Router.POST("/engines/:model/embeddings", controller.Relay)
|
||||
relayV1Router.POST("/audio/transcriptions", controller.RelayNotImplemented)
|
||||
relayV1Router.POST("/audio/translations", controller.RelayNotImplemented)
|
||||
relayV1Router.GET("/files", controller.RelayNotImplemented)
|
||||
|
@@ -12,7 +12,7 @@ import {
|
||||
} from 'semantic-ui-react';
|
||||
import { Link, useNavigate, useSearchParams } from 'react-router-dom';
|
||||
import { UserContext } from '../context/User';
|
||||
import { API, getLogo, showError, showSuccess } from '../helpers';
|
||||
import { API, getLogo, showError, showSuccess, showInfo } from '../helpers';
|
||||
|
||||
const LoginForm = () => {
|
||||
const [inputs, setInputs] = useState({
|
||||
@@ -76,7 +76,7 @@ const LoginForm = () => {
|
||||
async function handleSubmit(e) {
|
||||
setSubmitted(true);
|
||||
if (username && password) {
|
||||
const res = await API.post('/api/user/login', {
|
||||
const res = await API.post(`/api/user/login`, {
|
||||
username,
|
||||
password,
|
||||
});
|
||||
|
@@ -20,6 +20,7 @@ const OperationSetting = () => {
|
||||
DisplayInCurrencyEnabled: '',
|
||||
DisplayTokenStatEnabled: '',
|
||||
ApproximateTokenEnabled: '',
|
||||
RetryTimes: 0,
|
||||
});
|
||||
const [originInputs, setOriginInputs] = useState({});
|
||||
let [loading, setLoading] = useState(false);
|
||||
@@ -122,6 +123,9 @@ const OperationSetting = () => {
|
||||
if (originInputs['QuotaPerUnit'] !== inputs.QuotaPerUnit) {
|
||||
await updateOption('QuotaPerUnit', inputs.QuotaPerUnit);
|
||||
}
|
||||
if (originInputs['RetryTimes'] !== inputs.RetryTimes) {
|
||||
await updateOption('RetryTimes', inputs.RetryTimes);
|
||||
}
|
||||
break;
|
||||
}
|
||||
};
|
||||
@@ -133,7 +137,7 @@ const OperationSetting = () => {
|
||||
<Header as='h3'>
|
||||
通用设置
|
||||
</Header>
|
||||
<Form.Group widths={3}>
|
||||
<Form.Group widths={4}>
|
||||
<Form.Input
|
||||
label='充值链接'
|
||||
name='TopUpLink'
|
||||
@@ -162,6 +166,17 @@ const OperationSetting = () => {
|
||||
step='0.01'
|
||||
placeholder='一单位货币能兑换的额度'
|
||||
/>
|
||||
<Form.Input
|
||||
label='失败重试次数'
|
||||
name='RetryTimes'
|
||||
type={'number'}
|
||||
step='1'
|
||||
min='0'
|
||||
onChange={handleInputChange}
|
||||
autoComplete='new-password'
|
||||
value={inputs.RetryTimes}
|
||||
placeholder='失败重试次数'
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Group inline>
|
||||
<Form.Checkbox
|
||||
|
@@ -226,6 +226,7 @@ const UsersTable = () => {
|
||||
<Popup
|
||||
content={user.email ? user.email : '未绑定邮箱地址'}
|
||||
key={user.username}
|
||||
header={user.display_name ? user.display_name : user.username}
|
||||
trigger={<span>{renderText(user.username, 10)}</span>}
|
||||
hoverable
|
||||
/>
|
||||
|
@@ -1,5 +1,5 @@
|
||||
import React, { useEffect, useState } from 'react';
|
||||
import { Button, Form, Header, Message, Segment } from 'semantic-ui-react';
|
||||
import { Button, Form, Header, Input, Message, Segment } from 'semantic-ui-react';
|
||||
import { useParams } from 'react-router-dom';
|
||||
import { API, showError, showInfo, showSuccess, verifyJSON } from '../../helpers';
|
||||
import { CHANNEL_OPTIONS } from '../../constants';
|
||||
@@ -31,6 +31,7 @@ const EditChannel = () => {
|
||||
const [groupOptions, setGroupOptions] = useState([]);
|
||||
const [basicModels, setBasicModels] = useState([]);
|
||||
const [fullModels, setFullModels] = useState([]);
|
||||
const [customModel, setCustomModel] = useState('');
|
||||
const handleInputChange = (e, { name, value }) => {
|
||||
setInputs((inputs) => ({ ...inputs, [name]: value }));
|
||||
};
|
||||
@@ -43,6 +44,19 @@ const EditChannel = () => {
|
||||
data.models = [];
|
||||
} else {
|
||||
data.models = data.models.split(',');
|
||||
setTimeout(() => {
|
||||
let localModelOptions = [...modelOptions];
|
||||
data.models.forEach((model) => {
|
||||
if (!localModelOptions.find((option) => option.key === model)) {
|
||||
localModelOptions.push({
|
||||
key: model,
|
||||
text: model,
|
||||
value: model
|
||||
});
|
||||
}
|
||||
});
|
||||
setModelOptions(localModelOptions);
|
||||
}, 1000);
|
||||
}
|
||||
if (data.group === '') {
|
||||
data.groups = [];
|
||||
@@ -263,6 +277,27 @@ const EditChannel = () => {
|
||||
<Button type={'button'} onClick={() => {
|
||||
handleInputChange(null, { name: 'models', value: [] });
|
||||
}}>清除所有模型</Button>
|
||||
<Input
|
||||
action={
|
||||
<Button type={'button'} onClick={()=>{
|
||||
let localModels = [...inputs.models];
|
||||
localModels.push(customModel);
|
||||
let localModelOptions = [...modelOptions];
|
||||
localModelOptions.push({
|
||||
key: customModel,
|
||||
text: customModel,
|
||||
value: customModel,
|
||||
});
|
||||
setModelOptions(localModelOptions);
|
||||
handleInputChange(null, { name: 'models', value: localModels });
|
||||
}}>填入</Button>
|
||||
}
|
||||
placeholder='输入自定义模型名称'
|
||||
value={customModel}
|
||||
onChange={(e, { value }) => {
|
||||
setCustomModel(value);
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
<Form.Field>
|
||||
<Form.TextArea
|
||||
@@ -309,7 +344,7 @@ const EditChannel = () => {
|
||||
/>
|
||||
)
|
||||
}
|
||||
<Button positive onClick={submit}>提交</Button>
|
||||
<Button type={isEdit ? "button" : "submit"} positive onClick={submit}>提交</Button>
|
||||
</Form>
|
||||
</Segment>
|
||||
</>
|
||||
|
Reference in New Issue
Block a user