mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-10-29 12:53:42 +08:00
Compare commits
4 Commits
v0.4.0-alp
...
v0.4.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e0d0674f81 | ||
|
|
4b6adaec0b | ||
|
|
9301b3fed3 | ||
|
|
c6edb78ac9 |
19
README.md
19
README.md
@@ -44,8 +44,7 @@ _✨ All in one 的 OpenAI 接口,整合各种 API 访问方式,开箱即用
|
|||||||
<a href="https://iamazing.cn/page/reward">赞赏支持</a>
|
<a href="https://iamazing.cn/page/reward">赞赏支持</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
> **Warning**:从 `v0.2` 版本升级到 `v0.3` 版本需要手动迁移数据库,请手动执行[数据库迁移脚本](./bin/migration_v0.2-v0.3.sql)。
|
> **Warning**:使用 Docker 拉取的最新镜像可能是 `alpha` 版本,如果追求稳定性请手动指定版本。
|
||||||
|
|
||||||
|
|
||||||
## 功能
|
## 功能
|
||||||
1. 支持多种 API 访问渠道,欢迎 PR 或提 issue 添加更多渠道:
|
1. 支持多种 API 访问渠道,欢迎 PR 或提 issue 添加更多渠道:
|
||||||
@@ -65,16 +64,18 @@ _✨ All in one 的 OpenAI 接口,整合各种 API 访问方式,开箱即用
|
|||||||
5. 支持**令牌管理**,设置令牌的过期时间和使用次数。
|
5. 支持**令牌管理**,设置令牌的过期时间和使用次数。
|
||||||
6. 支持**兑换码管理**,支持批量生成和导出兑换码,可使用兑换码为账户进行充值。
|
6. 支持**兑换码管理**,支持批量生成和导出兑换码,可使用兑换码为账户进行充值。
|
||||||
7. 支持**通道管理**,批量创建通道。
|
7. 支持**通道管理**,批量创建通道。
|
||||||
8. 支持发布公告,设置充值链接,设置新用户初始额度。
|
8. 支持**用户分组**以及**渠道分组**。
|
||||||
9. 支持丰富的**自定义**设置,
|
9. 支持渠道**设置模型列表**。
|
||||||
1. 支持自定义系统名称,logo 以及页脚。
|
10. 支持发布公告,设置充值链接,设置新用户初始额度。
|
||||||
2. 支持自定义首页和关于页面,可以选择使用 HTML & Markdown 代码进行自定义,或者使用一个单独的网页通过 iframe 嵌入。
|
11. 支持丰富的**自定义**设置,
|
||||||
10. 支持通过系统访问令牌访问管理 API。
|
1. 支持自定义系统名称,logo 以及页脚。
|
||||||
11. 支持用户管理,支持**多种用户登录注册方式**:
|
2. 支持自定义首页和关于页面,可以选择使用 HTML & Markdown 代码进行自定义,或者使用一个单独的网页通过 iframe 嵌入。
|
||||||
|
12. 支持通过系统访问令牌访问管理 API。
|
||||||
|
13. 支持用户管理,支持**多种用户登录注册方式**:
|
||||||
+ 邮箱登录注册以及通过邮箱进行密码重置。
|
+ 邮箱登录注册以及通过邮箱进行密码重置。
|
||||||
+ [GitHub 开放授权](https://github.com/settings/applications/new)。
|
+ [GitHub 开放授权](https://github.com/settings/applications/new)。
|
||||||
+ 微信公众号授权(需要额外部署 [WeChat Server](https://github.com/songquanpeng/wechat-server))。
|
+ 微信公众号授权(需要额外部署 [WeChat Server](https://github.com/songquanpeng/wechat-server))。
|
||||||
12. 未来其他大模型开放 API 后,将第一时间支持,并将其封装成同样的 API 访问方式。
|
14. 未来其他大模型开放 API 后,将第一时间支持,并将其封装成同样的 API 访问方式。
|
||||||
|
|
||||||
## 部署
|
## 部署
|
||||||
### 基于 Docker 进行部署
|
### 基于 Docker 进行部署
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ var ModelRatio = map[string]float64{
|
|||||||
"gpt-4-0314": 15,
|
"gpt-4-0314": 15,
|
||||||
"gpt-4-32k": 30,
|
"gpt-4-32k": 30,
|
||||||
"gpt-4-32k-0314": 30,
|
"gpt-4-32k-0314": 30,
|
||||||
"gpt-3.5-turbo": 1,
|
"gpt-3.5-turbo": 1, // $0.002 / 1K tokens
|
||||||
"gpt-3.5-turbo-0301": 1,
|
"gpt-3.5-turbo-0301": 1,
|
||||||
"text-ada-001": 0.2,
|
"text-ada-001": 0.2,
|
||||||
"text-babbage-001": 0.25,
|
"text-babbage-001": 0.25,
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ func testChannel(channel *model.Channel, request *ChatRequest) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if response.Error.Message != "" || response.Error.Code != "" {
|
if response.Usage.CompletionTokens == 0 {
|
||||||
return errors.New(fmt.Sprintf("type %s, code %s, message %s", response.Error.Type, response.Error.Code, response.Error.Message))
|
return errors.New(fmt.Sprintf("type %s, code %s, message %s", response.Error.Type, response.Error.Code, response.Error.Message))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -116,6 +116,51 @@ func init() {
|
|||||||
Root: "text-embedding-ada-002",
|
Root: "text-embedding-ada-002",
|
||||||
Parent: nil,
|
Parent: nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Id: "text-davinci-003",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1677649963,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Permission: permission,
|
||||||
|
Root: "text-davinci-003",
|
||||||
|
Parent: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Id: "text-davinci-002",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1677649963,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Permission: permission,
|
||||||
|
Root: "text-davinci-002",
|
||||||
|
Parent: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Id: "text-curie-001",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1677649963,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Permission: permission,
|
||||||
|
Root: "text-curie-001",
|
||||||
|
Parent: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Id: "text-babbage-001",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1677649963,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Permission: permission,
|
||||||
|
Root: "text-babbage-001",
|
||||||
|
Parent: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Id: "text-ada-001",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1677649963,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Permission: permission,
|
||||||
|
Root: "text-ada-001",
|
||||||
|
Parent: nil,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
openAIModelsMap = make(map[string]OpenAIModels)
|
openAIModelsMap = make(map[string]OpenAIModels)
|
||||||
for _, model := range openAIModels {
|
for _, model := range openAIModels {
|
||||||
|
|||||||
@@ -19,6 +19,13 @@ type Message struct {
|
|||||||
Name *string `json:"name,omitempty"`
|
Name *string `json:"name,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
RelayModeUnknown = iota
|
||||||
|
RelayModeChatCompletions
|
||||||
|
RelayModeCompletions
|
||||||
|
RelayModeEmbeddings
|
||||||
|
)
|
||||||
|
|
||||||
// https://platform.openai.com/docs/api-reference/chat
|
// https://platform.openai.com/docs/api-reference/chat
|
||||||
|
|
||||||
type GeneralOpenAIRequest struct {
|
type GeneralOpenAIRequest struct {
|
||||||
@@ -69,7 +76,7 @@ type TextResponse struct {
|
|||||||
Error OpenAIError `json:"error"`
|
Error OpenAIError `json:"error"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type StreamResponse struct {
|
type ChatCompletionsStreamResponse struct {
|
||||||
Choices []struct {
|
Choices []struct {
|
||||||
Delta struct {
|
Delta struct {
|
||||||
Content string `json:"content"`
|
Content string `json:"content"`
|
||||||
@@ -78,8 +85,23 @@ type StreamResponse struct {
|
|||||||
} `json:"choices"`
|
} `json:"choices"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CompletionsStreamResponse struct {
|
||||||
|
Choices []struct {
|
||||||
|
Text string `json:"text"`
|
||||||
|
FinishReason string `json:"finish_reason"`
|
||||||
|
} `json:"choices"`
|
||||||
|
}
|
||||||
|
|
||||||
func Relay(c *gin.Context) {
|
func Relay(c *gin.Context) {
|
||||||
err := relayHelper(c)
|
relayMode := RelayModeUnknown
|
||||||
|
if strings.HasPrefix(c.Request.URL.Path, "/v1/chat/completions") {
|
||||||
|
relayMode = RelayModeChatCompletions
|
||||||
|
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/completions") {
|
||||||
|
relayMode = RelayModeCompletions
|
||||||
|
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/embeddings") {
|
||||||
|
relayMode = RelayModeEmbeddings
|
||||||
|
}
|
||||||
|
err := relayHelper(c, relayMode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err.StatusCode == http.StatusTooManyRequests {
|
if err.StatusCode == http.StatusTooManyRequests {
|
||||||
err.OpenAIError.Message = "负载已满,请稍后再试,或升级账户以提升服务质量。"
|
err.OpenAIError.Message = "负载已满,请稍后再试,或升级账户以提升服务质量。"
|
||||||
@@ -110,7 +132,7 @@ func errorWrapper(err error, code string, statusCode int) *OpenAIErrorWithStatus
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func relayHelper(c *gin.Context) *OpenAIErrorWithStatusCode {
|
func relayHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||||
channelType := c.GetInt("channel")
|
channelType := c.GetInt("channel")
|
||||||
tokenId := c.GetInt("token_id")
|
tokenId := c.GetInt("token_id")
|
||||||
consumeQuota := c.GetBool("consume_quota")
|
consumeQuota := c.GetBool("consume_quota")
|
||||||
@@ -148,8 +170,13 @@ func relayHelper(c *gin.Context) *OpenAIErrorWithStatusCode {
|
|||||||
err := relayPaLM(textRequest, c)
|
err := relayPaLM(textRequest, c)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
var promptTokens int
|
||||||
promptTokens := countTokenMessages(textRequest.Messages, textRequest.Model)
|
switch relayMode {
|
||||||
|
case RelayModeChatCompletions:
|
||||||
|
promptTokens = countTokenMessages(textRequest.Messages, textRequest.Model)
|
||||||
|
case RelayModeCompletions:
|
||||||
|
promptTokens = countTokenText(textRequest.Prompt, textRequest.Model)
|
||||||
|
}
|
||||||
preConsumedTokens := common.PreConsumedQuota
|
preConsumedTokens := common.PreConsumedQuota
|
||||||
if textRequest.MaxTokens != 0 {
|
if textRequest.MaxTokens != 0 {
|
||||||
preConsumedTokens = promptTokens + textRequest.MaxTokens
|
preConsumedTokens = promptTokens + textRequest.MaxTokens
|
||||||
@@ -245,14 +272,27 @@ func relayHelper(c *gin.Context) *OpenAIErrorWithStatusCode {
|
|||||||
dataChan <- data
|
dataChan <- data
|
||||||
data = data[6:]
|
data = data[6:]
|
||||||
if !strings.HasPrefix(data, "[DONE]") {
|
if !strings.HasPrefix(data, "[DONE]") {
|
||||||
var streamResponse StreamResponse
|
switch relayMode {
|
||||||
err = json.Unmarshal([]byte(data), &streamResponse)
|
case RelayModeChatCompletions:
|
||||||
if err != nil {
|
var streamResponse ChatCompletionsStreamResponse
|
||||||
common.SysError("Error unmarshalling stream response: " + err.Error())
|
err = json.Unmarshal([]byte(data), &streamResponse)
|
||||||
return
|
if err != nil {
|
||||||
}
|
common.SysError("Error unmarshalling stream response: " + err.Error())
|
||||||
for _, choice := range streamResponse.Choices {
|
return
|
||||||
streamResponseText += choice.Delta.Content
|
}
|
||||||
|
for _, choice := range streamResponse.Choices {
|
||||||
|
streamResponseText += choice.Delta.Content
|
||||||
|
}
|
||||||
|
case RelayModeCompletions:
|
||||||
|
var streamResponse CompletionsStreamResponse
|
||||||
|
err = json.Unmarshal([]byte(data), &streamResponse)
|
||||||
|
if err != nil {
|
||||||
|
common.SysError("Error unmarshalling stream response: " + err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, choice := range streamResponse.Choices {
|
||||||
|
streamResponseText += choice.Text
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -83,7 +83,7 @@ func (redemption *Redemption) SelectUpdate() error {
|
|||||||
// Update Make sure your token's fields is completed, because this will update non-zero values
|
// Update Make sure your token's fields is completed, because this will update non-zero values
|
||||||
func (redemption *Redemption) Update() error {
|
func (redemption *Redemption) Update() error {
|
||||||
var err error
|
var err error
|
||||||
err = DB.Model(redemption).Select("name", "status", "redeemed_time").Updates(redemption).Error
|
err = DB.Model(redemption).Select("name", "status", "quota", "redeemed_time").Updates(redemption).Error
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ func SetRelayRouter(router *gin.Engine) {
|
|||||||
relayV1Router := router.Group("/v1")
|
relayV1Router := router.Group("/v1")
|
||||||
relayV1Router.Use(middleware.TokenAuth(), middleware.Distribute())
|
relayV1Router.Use(middleware.TokenAuth(), middleware.Distribute())
|
||||||
{
|
{
|
||||||
relayV1Router.POST("/completions", controller.RelayNotImplemented)
|
relayV1Router.POST("/completions", controller.Relay)
|
||||||
relayV1Router.POST("/chat/completions", controller.Relay)
|
relayV1Router.POST("/chat/completions", controller.Relay)
|
||||||
relayV1Router.POST("/edits", controller.RelayNotImplemented)
|
relayV1Router.POST("/edits", controller.RelayNotImplemented)
|
||||||
relayV1Router.POST("/images/generations", controller.RelayNotImplemented)
|
relayV1Router.POST("/images/generations", controller.RelayNotImplemented)
|
||||||
|
|||||||
Reference in New Issue
Block a user