Compare commits
3 Commits
v0.5.12-al
...
v0.5.10-de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5df751f5ca | ||
|
|
b519170c06 | ||
|
|
3d9ad0024a |
14
.github/workflows/docker-image-amd64.yml
vendored
@@ -9,6 +9,8 @@ on:
|
||||
name:
|
||||
description: 'reason'
|
||||
required: false
|
||||
permissions:
|
||||
packages: write
|
||||
jobs:
|
||||
push_to_registries:
|
||||
name: Push Docker image to multiple registries
|
||||
@@ -25,12 +27,6 @@ jobs:
|
||||
git describe --tags > VERSION
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
@@ -41,9 +37,7 @@ jobs:
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: |
|
||||
justsong/one-api
|
||||
ghcr.io/${{ github.repository }}
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v3
|
||||
@@ -51,4 +45,4 @@ jobs:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
11
.github/workflows/linux-release.yml
vendored
@@ -7,11 +7,6 @@ on:
|
||||
tags:
|
||||
- '*'
|
||||
- '!*-alpha*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
name:
|
||||
description: 'reason'
|
||||
required: false
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -23,13 +18,13 @@ jobs:
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
- name: Build Frontend (theme default)
|
||||
- name: Build Frontend
|
||||
env:
|
||||
CI: ""
|
||||
run: |
|
||||
cd web
|
||||
git describe --tags > VERSION
|
||||
REACT_APP_VERSION=$(git describe --tags) chmod u+x ./build.sh && ./build.sh
|
||||
npm install
|
||||
REACT_APP_VERSION=$(git describe --tags) npm run build
|
||||
cd ..
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
|
||||
11
.github/workflows/macos-release.yml
vendored
@@ -7,11 +7,6 @@ on:
|
||||
tags:
|
||||
- '*'
|
||||
- '!*-alpha*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
name:
|
||||
description: 'reason'
|
||||
required: false
|
||||
jobs:
|
||||
release:
|
||||
runs-on: macos-latest
|
||||
@@ -23,13 +18,13 @@ jobs:
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
- name: Build Frontend (theme default)
|
||||
- name: Build Frontend
|
||||
env:
|
||||
CI: ""
|
||||
run: |
|
||||
cd web
|
||||
git describe --tags > VERSION
|
||||
REACT_APP_VERSION=$(git describe --tags) chmod u+x ./build.sh && ./build.sh
|
||||
npm install
|
||||
REACT_APP_VERSION=$(git describe --tags) npm run build
|
||||
cd ..
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
|
||||
11
.github/workflows/windows-release.yml
vendored
@@ -7,11 +7,6 @@ on:
|
||||
tags:
|
||||
- '*'
|
||||
- '!*-alpha*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
name:
|
||||
description: 'reason'
|
||||
required: false
|
||||
jobs:
|
||||
release:
|
||||
runs-on: windows-latest
|
||||
@@ -26,14 +21,14 @@ jobs:
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
- name: Build Frontend (theme default)
|
||||
- name: Build Frontend
|
||||
env:
|
||||
CI: ""
|
||||
run: |
|
||||
cd web/default
|
||||
cd web
|
||||
npm install
|
||||
REACT_APP_VERSION=$(git describe --tags) npm run build
|
||||
cd ../..
|
||||
cd ..
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
|
||||
17
Dockerfile
@@ -1,15 +1,10 @@
|
||||
FROM node:16 as builder
|
||||
|
||||
WORKDIR /web
|
||||
COPY ./VERSION .
|
||||
WORKDIR /build
|
||||
COPY web/package.json .
|
||||
RUN npm install
|
||||
COPY ./web .
|
||||
|
||||
WORKDIR /web/default
|
||||
RUN npm install
|
||||
RUN DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat VERSION) npm run build
|
||||
|
||||
WORKDIR /web/berry
|
||||
RUN npm install
|
||||
COPY ./VERSION .
|
||||
RUN DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat VERSION) npm run build
|
||||
|
||||
FROM golang AS builder2
|
||||
@@ -22,7 +17,7 @@ WORKDIR /build
|
||||
ADD go.mod go.sum ./
|
||||
RUN go mod download
|
||||
COPY . .
|
||||
COPY --from=builder /web/build ./web/build
|
||||
COPY --from=builder /build/build ./web/build
|
||||
RUN go build -ldflags "-s -w -X 'one-api/common.Version=$(cat VERSION)' -extldflags '-static'" -o one-api
|
||||
|
||||
FROM alpine
|
||||
@@ -35,4 +30,4 @@ RUN apk update \
|
||||
COPY --from=builder2 /build/one-api /
|
||||
EXPOSE 3000
|
||||
WORKDIR /data
|
||||
ENTRYPOINT ["/one-api"]
|
||||
ENTRYPOINT ["/one-api"]
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/songquanpeng/one-api"><img src="https://raw.githubusercontent.com/songquanpeng/one-api/main/web/default/public/logo.png" width="150" height="150" alt="one-api logo"></a>
|
||||
<a href="https://github.com/songquanpeng/one-api"><img src="https://raw.githubusercontent.com/songquanpeng/one-api/main/web/public/logo.png" width="150" height="150" alt="one-api logo"></a>
|
||||
</p>
|
||||
|
||||
<div align="center">
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/songquanpeng/one-api"><img src="https://raw.githubusercontent.com/songquanpeng/one-api/main/web/default/public/logo.png" width="150" height="150" alt="one-api logo"></a>
|
||||
<a href="https://github.com/songquanpeng/one-api"><img src="https://raw.githubusercontent.com/songquanpeng/one-api/main/web/public/logo.png" width="150" height="150" alt="one-api logo"></a>
|
||||
</p>
|
||||
|
||||
<div align="center">
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/songquanpeng/one-api"><img src="https://raw.githubusercontent.com/songquanpeng/one-api/main/web/default/public/logo.png" width="150" height="150" alt="one-api logo"></a>
|
||||
<a href="https://github.com/songquanpeng/one-api"><img src="https://raw.githubusercontent.com/songquanpeng/one-api/main/web/public/logo.png" width="150" height="150" alt="one-api logo"></a>
|
||||
</p>
|
||||
|
||||
<div align="center">
|
||||
@@ -99,7 +99,6 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
|
||||
+ 邮箱登录注册(支持注册邮箱白名单)以及通过邮箱进行密码重置。
|
||||
+ [GitHub 开放授权](https://github.com/settings/applications/new)。
|
||||
+ 微信公众号授权(需要额外部署 [WeChat Server](https://github.com/songquanpeng/wechat-server))。
|
||||
23. 支持主题切换,设置环境变量 `THEME` 即可,默认为 `default`,欢迎 PR 更多主题,具体参考[此处](./web/README.md)。
|
||||
|
||||
## 部署
|
||||
### 基于 Docker 进行部署
|
||||
@@ -367,8 +366,6 @@ graph LR
|
||||
+ `DATA_GYM_CACHE_DIR`:目前该配置作用与 `TIKTOKEN_CACHE_DIR` 一致,但是优先级没有它高。
|
||||
15. `RELAY_TIMEOUT`:中继超时设置,单位为秒,默认不设置超时时间。
|
||||
16. `SQLITE_BUSY_TIMEOUT`:SQLite 锁等待超时设置,单位为毫秒,默认 `3000`。
|
||||
17. `GEMINI_SAFETY_SETTING`:Gemini 的安全设置,默认 `BLOCK_NONE`。
|
||||
18. `THEME`:系统的主题设置,默认为 `default`,具体可选值参考[此处](./web/README.md)。
|
||||
|
||||
### 命令行参数
|
||||
1. `--port <port_number>`: 指定服务器监听的端口号,默认为 `3000`。
|
||||
@@ -414,9 +411,6 @@ https://openai.justsong.cn
|
||||
8. 升级之前数据库需要做变更吗?
|
||||
+ 一般情况下不需要,系统将在初始化的时候自动调整。
|
||||
+ 如果需要的话,我会在更新日志中说明,并给出脚本。
|
||||
9. 手动修改数据库后报错:`数据库一致性已被破坏,请联系管理员`?
|
||||
+ 这是检测到 ability 表里有些记录的通道 id 是不存在的,这大概率是因为你删了 channel 表里的记录但是没有同步在 ability 表里清理无效的通道。
|
||||
+ 对于每一个通道,其所支持的模型都需要有一个专门的 ability 表的记录,表示该通道支持该模型。
|
||||
|
||||
## 相关项目
|
||||
* [FastGPT](https://github.com/labring/FastGPT): 基于 LLM 大语言模型的知识库问答系统
|
||||
|
||||
@@ -98,14 +98,6 @@ var BatchUpdateInterval = GetOrDefault("BATCH_UPDATE_INTERVAL", 5)
|
||||
|
||||
var RelayTimeout = GetOrDefault("RELAY_TIMEOUT", 0) // unit is second
|
||||
|
||||
var GeminiSafetySetting = GetOrDefaultString("GEMINI_SAFETY_SETTING", "BLOCK_NONE")
|
||||
|
||||
var Theme = GetOrDefaultString("THEME", "default")
|
||||
var ValidThemes = map[string]bool{
|
||||
"default": true,
|
||||
"berry": true,
|
||||
}
|
||||
|
||||
const (
|
||||
RequestIdKey = "X-Oneapi-Request-Id"
|
||||
)
|
||||
|
||||
@@ -31,11 +31,3 @@ func UnmarshalBodyReusable(c *gin.Context, v any) error {
|
||||
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetEventStreamHeaders(c *gin.Context) {
|
||||
c.Writer.Header().Set("Content-Type", "text/event-stream")
|
||||
c.Writer.Header().Set("Cache-Control", "no-cache")
|
||||
c.Writer.Header().Set("Connection", "keep-alive")
|
||||
c.Writer.Header().Set("Transfer-Encoding", "chunked")
|
||||
c.Writer.Header().Set("X-Accel-Buffering", "no")
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"image"
|
||||
_ "image/gif"
|
||||
_ "image/jpeg"
|
||||
@@ -10,30 +8,11 @@ import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
_ "golang.org/x/image/webp"
|
||||
)
|
||||
|
||||
// Regex to match data URL pattern
|
||||
var dataURLPattern = regexp.MustCompile(`data:image/([^;]+);base64,(.*)`)
|
||||
|
||||
func IsImageUrl(url string) (bool, error) {
|
||||
resp, err := http.Head(url)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func GetImageSizeFromUrl(url string) (width int, height int, err error) {
|
||||
isImage, err := IsImageUrl(url)
|
||||
if !isImage {
|
||||
return
|
||||
}
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -46,60 +25,17 @@ func GetImageSizeFromUrl(url string) (width int, height int, err error) {
|
||||
return img.Width, img.Height, nil
|
||||
}
|
||||
|
||||
func GetImageFromUrl(url string) (mimeType string, data string, err error) {
|
||||
// Check if the URL is a data URL
|
||||
matches := dataURLPattern.FindStringSubmatch(url)
|
||||
if len(matches) == 3 {
|
||||
// URL is a data URL
|
||||
mimeType = "image/" + matches[1]
|
||||
data = matches[2]
|
||||
return
|
||||
}
|
||||
|
||||
isImage, err := IsImageUrl(url)
|
||||
if !isImage {
|
||||
return
|
||||
}
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
buffer := bytes.NewBuffer(nil)
|
||||
_, err = buffer.ReadFrom(resp.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
mimeType = resp.Header.Get("Content-Type")
|
||||
data = base64.StdEncoding.EncodeToString(buffer.Bytes())
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
reg = regexp.MustCompile(`data:image/([^;]+);base64,`)
|
||||
)
|
||||
|
||||
var readerPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &bytes.Reader{}
|
||||
},
|
||||
}
|
||||
|
||||
func GetImageSizeFromBase64(encoded string) (width int, height int, err error) {
|
||||
decoded, err := base64.StdEncoding.DecodeString(reg.ReplaceAllString(encoded, ""))
|
||||
encoded = strings.TrimPrefix(encoded, "data:image/png;base64,")
|
||||
base64 := strings.NewReader(reg.ReplaceAllString(encoded, ""))
|
||||
img, _, err := image.DecodeConfig(base64)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
return
|
||||
}
|
||||
|
||||
reader := readerPool.Get().(*bytes.Reader)
|
||||
defer readerPool.Put(reader)
|
||||
reader.Reset(decoded)
|
||||
|
||||
img, _, err := image.DecodeConfig(reader)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
return img.Width, img.Height, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -152,20 +152,3 @@ func TestGetImageSize(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImageSizeFromBase64(t *testing.T) {
|
||||
for i, c := range cases {
|
||||
t.Run("Decode:"+strconv.Itoa(i), func(t *testing.T) {
|
||||
resp, err := http.Get(c.url)
|
||||
assert.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
encoded := base64.StdEncoding.EncodeToString(data)
|
||||
width, height, err := img.GetImageSizeFromBase64(encoded)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, c.width, width)
|
||||
assert.Equal(t, c.height, height)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,11 +36,7 @@ func init() {
|
||||
}
|
||||
|
||||
if os.Getenv("SESSION_SECRET") != "" {
|
||||
if os.Getenv("SESSION_SECRET") == "random_string" {
|
||||
SysError("SESSION_SECRET is set to an example value, please change it to a random string.")
|
||||
} else {
|
||||
SessionSecret = os.Getenv("SESSION_SECRET")
|
||||
}
|
||||
SessionSecret = os.Getenv("SESSION_SECRET")
|
||||
}
|
||||
if os.Getenv("SQLITE_PATH") != "" {
|
||||
SQLitePath = os.Getenv("SQLITE_PATH")
|
||||
|
||||
@@ -52,8 +52,6 @@ var ModelRatio = map[string]float64{
|
||||
"gpt-3.5-turbo-16k-0613": 1.5,
|
||||
"gpt-3.5-turbo-instruct": 0.75, // $0.0015 / 1K tokens
|
||||
"gpt-3.5-turbo-1106": 0.5, // $0.001 / 1K tokens
|
||||
"davinci-002": 1, // $0.002 / 1K tokens
|
||||
"babbage-002": 0.2, // $0.0004 / 1K tokens
|
||||
"text-ada-001": 0.2,
|
||||
"text-babbage-001": 0.25,
|
||||
"text-curie-001": 1,
|
||||
@@ -86,7 +84,6 @@ var ModelRatio = map[string]float64{
|
||||
"Embedding-V1": 0.1429, // ¥0.002 / 1k tokens
|
||||
"PaLM-2": 1,
|
||||
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
"gemini-pro-vision": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
"chatglm_turbo": 0.3572, // ¥0.005 / 1k tokens
|
||||
"chatglm_pro": 0.7143, // ¥0.01 / 1k tokens
|
||||
"chatglm_std": 0.3572, // ¥0.005 / 1k tokens
|
||||
@@ -118,9 +115,6 @@ func UpdateModelRatioByJSONString(jsonStr string) error {
|
||||
}
|
||||
|
||||
func GetModelRatio(name string) float64 {
|
||||
if strings.HasPrefix(name, "qwen-") && strings.HasSuffix(name, "-internet") {
|
||||
name = strings.TrimSuffix(name, "-internet")
|
||||
}
|
||||
ratio, ok := ModelRatio[name]
|
||||
if !ok {
|
||||
SysError("model ratio not found: " + name)
|
||||
|
||||
@@ -196,13 +196,6 @@ func GetOrDefault(env string, defaultValue int) int {
|
||||
return num
|
||||
}
|
||||
|
||||
func GetOrDefaultString(env string, defaultValue string) string {
|
||||
if env == "" || os.Getenv(env) == "" {
|
||||
return defaultValue
|
||||
}
|
||||
return os.Getenv(env)
|
||||
}
|
||||
|
||||
func MessageWithRequestId(message string, id string) string {
|
||||
return fmt.Sprintf("%s (request id: %s)", message, id)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/relay/channel/openai"
|
||||
)
|
||||
|
||||
func GetSubscription(c *gin.Context) {
|
||||
@@ -28,12 +27,12 @@ func GetSubscription(c *gin.Context) {
|
||||
expiredTime = 0
|
||||
}
|
||||
if err != nil {
|
||||
Error := openai.Error{
|
||||
openAIError := OpenAIError{
|
||||
Message: err.Error(),
|
||||
Type: "upstream_error",
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"error": Error,
|
||||
"error": openAIError,
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -70,12 +69,12 @@ func GetUsage(c *gin.Context) {
|
||||
quota, err = model.GetUserUsedQuota(userId)
|
||||
}
|
||||
if err != nil {
|
||||
Error := openai.Error{
|
||||
openAIError := OpenAIError{
|
||||
Message: err.Error(),
|
||||
Type: "one_api_error",
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"error": Error,
|
||||
"error": openAIError,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/relay/util"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -93,7 +92,7 @@ func GetResponseBody(method, url string, channel *model.Channel, headers http.He
|
||||
for k := range headers {
|
||||
req.Header.Add(k, headers.Get(k))
|
||||
}
|
||||
res, err := util.HTTPClient.Do(req)
|
||||
res, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/relay/channel/openai"
|
||||
"one-api/relay/util"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -18,7 +16,7 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func testChannel(channel *model.Channel, request openai.ChatRequest) (err error, openaiErr *openai.Error) {
|
||||
func testChannel(channel *model.Channel, request ChatRequest) (err error, openaiErr *OpenAIError) {
|
||||
switch channel.Type {
|
||||
case common.ChannelTypePaLM:
|
||||
fallthrough
|
||||
@@ -48,13 +46,13 @@ func testChannel(channel *model.Channel, request openai.ChatRequest) (err error,
|
||||
}
|
||||
requestURL := common.ChannelBaseURLs[channel.Type]
|
||||
if channel.Type == common.ChannelTypeAzure {
|
||||
requestURL = util.GetFullRequestURL(channel.GetBaseURL(), fmt.Sprintf("/openai/deployments/%s/chat/completions?api-version=2023-03-15-preview", request.Model), channel.Type)
|
||||
requestURL = getFullRequestURL(channel.GetBaseURL(), fmt.Sprintf("/openai/deployments/%s/chat/completions?api-version=2023-03-15-preview", request.Model), channel.Type)
|
||||
} else {
|
||||
if baseURL := channel.GetBaseURL(); len(baseURL) > 0 {
|
||||
requestURL = baseURL
|
||||
}
|
||||
|
||||
requestURL = util.GetFullRequestURL(requestURL, "/v1/chat/completions", channel.Type)
|
||||
requestURL = getFullRequestURL(requestURL, "/v1/chat/completions", channel.Type)
|
||||
}
|
||||
jsonData, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
@@ -70,12 +68,12 @@ func testChannel(channel *model.Channel, request openai.ChatRequest) (err error,
|
||||
req.Header.Set("Authorization", "Bearer "+channel.Key)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
resp, err := util.HTTPClient.Do(req)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var response openai.SlimTextResponse
|
||||
var response TextResponse
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
@@ -93,12 +91,12 @@ func testChannel(channel *model.Channel, request openai.ChatRequest) (err error,
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func buildTestRequest() *openai.ChatRequest {
|
||||
testRequest := &openai.ChatRequest{
|
||||
func buildTestRequest() *ChatRequest {
|
||||
testRequest := &ChatRequest{
|
||||
Model: "", // this will be set later
|
||||
MaxTokens: 1,
|
||||
}
|
||||
testMessage := openai.Message{
|
||||
testMessage := Message{
|
||||
Role: "user",
|
||||
Content: "hi",
|
||||
}
|
||||
@@ -206,10 +204,10 @@ func testAllChannels(notify bool) error {
|
||||
err = errors.New(fmt.Sprintf("响应时间 %.2fs 超过阈值 %.2fs", float64(milliseconds)/1000.0, float64(disableThreshold)/1000.0))
|
||||
disableChannel(channel.Id, channel.Name, err.Error())
|
||||
}
|
||||
if isChannelEnabled && util.ShouldDisableChannel(openaiErr, -1) {
|
||||
if isChannelEnabled && shouldDisableChannel(openaiErr, -1) {
|
||||
disableChannel(channel.Id, channel.Name, err.Error())
|
||||
}
|
||||
if !isChannelEnabled && util.ShouldEnableChannel(err, openaiErr) {
|
||||
if !isChannelEnabled && shouldEnableChannel(err, openaiErr) {
|
||||
enableChannel(channel.Id, channel.Name)
|
||||
}
|
||||
channel.UpdateResponseTime(milliseconds)
|
||||
|
||||
@@ -2,8 +2,8 @@ package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"one-api/relay/channel/openai"
|
||||
)
|
||||
|
||||
// https://platform.openai.com/docs/api-reference/models/list
|
||||
@@ -342,24 +342,6 @@ func init() {
|
||||
Root: "code-davinci-edit-001",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "davinci-002",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "davinci-002",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "babbage-002",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "babbage-002",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "claude-instant-1",
|
||||
Object: "model",
|
||||
@@ -436,7 +418,7 @@ func init() {
|
||||
Id: "PaLM-2",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "google palm",
|
||||
OwnedBy: "google",
|
||||
Permission: permission,
|
||||
Root: "PaLM-2",
|
||||
Parent: nil,
|
||||
@@ -445,20 +427,11 @@ func init() {
|
||||
Id: "gemini-pro",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "google gemini",
|
||||
OwnedBy: "google",
|
||||
Permission: permission,
|
||||
Root: "gemini-pro",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gemini-pro-vision",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "google gemini",
|
||||
Permission: permission,
|
||||
Root: "gemini-pro-vision",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "chatglm_turbo",
|
||||
Object: "model",
|
||||
@@ -613,14 +586,14 @@ func RetrieveModel(c *gin.Context) {
|
||||
if model, ok := openAIModelsMap[modelId]; ok {
|
||||
c.JSON(200, model)
|
||||
} else {
|
||||
Error := openai.Error{
|
||||
openAIError := OpenAIError{
|
||||
Message: fmt.Sprintf("The model '%s' does not exist", modelId),
|
||||
Type: "invalid_request_error",
|
||||
Param: "model",
|
||||
Code: "model_not_found",
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"error": Error,
|
||||
"error": openAIError,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,14 +42,6 @@ func UpdateOption(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
switch option.Key {
|
||||
case "Theme":
|
||||
if !common.ValidThemes[option.Value] {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": "无效的主题",
|
||||
})
|
||||
return
|
||||
}
|
||||
case "GitHubOAuthEnabled":
|
||||
if option.Value == "true" && common.GitHubClientId == "" {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package aiproxy
|
||||
package controller
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -8,27 +8,56 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/relay/channel/openai"
|
||||
"one-api/relay/constant"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// https://docs.aiproxy.io/dev/library#使用已经定制好的知识库进行对话问答
|
||||
|
||||
func ConvertRequest(request openai.GeneralOpenAIRequest) *LibraryRequest {
|
||||
type AIProxyLibraryRequest struct {
|
||||
Model string `json:"model"`
|
||||
Query string `json:"query"`
|
||||
LibraryId string `json:"libraryId"`
|
||||
Stream bool `json:"stream"`
|
||||
}
|
||||
|
||||
type AIProxyLibraryError struct {
|
||||
ErrCode int `json:"errCode"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type AIProxyLibraryDocument struct {
|
||||
Title string `json:"title"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
type AIProxyLibraryResponse struct {
|
||||
Success bool `json:"success"`
|
||||
Answer string `json:"answer"`
|
||||
Documents []AIProxyLibraryDocument `json:"documents"`
|
||||
AIProxyLibraryError
|
||||
}
|
||||
|
||||
type AIProxyLibraryStreamResponse struct {
|
||||
Content string `json:"content"`
|
||||
Finish bool `json:"finish"`
|
||||
Model string `json:"model"`
|
||||
Documents []AIProxyLibraryDocument `json:"documents"`
|
||||
}
|
||||
|
||||
func requestOpenAI2AIProxyLibrary(request GeneralOpenAIRequest) *AIProxyLibraryRequest {
|
||||
query := ""
|
||||
if len(request.Messages) != 0 {
|
||||
query = request.Messages[len(request.Messages)-1].StringContent()
|
||||
}
|
||||
return &LibraryRequest{
|
||||
return &AIProxyLibraryRequest{
|
||||
Model: request.Model,
|
||||
Stream: request.Stream,
|
||||
Query: query,
|
||||
}
|
||||
}
|
||||
|
||||
func aiProxyDocuments2Markdown(documents []LibraryDocument) string {
|
||||
func aiProxyDocuments2Markdown(documents []AIProxyLibraryDocument) string {
|
||||
if len(documents) == 0 {
|
||||
return ""
|
||||
}
|
||||
@@ -39,52 +68,52 @@ func aiProxyDocuments2Markdown(documents []LibraryDocument) string {
|
||||
return content
|
||||
}
|
||||
|
||||
func responseAIProxyLibrary2OpenAI(response *LibraryResponse) *openai.TextResponse {
|
||||
func responseAIProxyLibrary2OpenAI(response *AIProxyLibraryResponse) *OpenAITextResponse {
|
||||
content := response.Answer + aiProxyDocuments2Markdown(response.Documents)
|
||||
choice := openai.TextResponseChoice{
|
||||
choice := OpenAITextResponseChoice{
|
||||
Index: 0,
|
||||
Message: openai.Message{
|
||||
Message: Message{
|
||||
Role: "assistant",
|
||||
Content: content,
|
||||
},
|
||||
FinishReason: "stop",
|
||||
}
|
||||
fullTextResponse := openai.TextResponse{
|
||||
fullTextResponse := OpenAITextResponse{
|
||||
Id: common.GetUUID(),
|
||||
Object: "chat.completion",
|
||||
Created: common.GetTimestamp(),
|
||||
Choices: []openai.TextResponseChoice{choice},
|
||||
Choices: []OpenAITextResponseChoice{choice},
|
||||
}
|
||||
return &fullTextResponse
|
||||
}
|
||||
|
||||
func documentsAIProxyLibrary(documents []LibraryDocument) *openai.ChatCompletionsStreamResponse {
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
func documentsAIProxyLibrary(documents []AIProxyLibraryDocument) *ChatCompletionsStreamResponse {
|
||||
var choice ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = aiProxyDocuments2Markdown(documents)
|
||||
choice.FinishReason = &constant.StopFinishReason
|
||||
return &openai.ChatCompletionsStreamResponse{
|
||||
choice.FinishReason = &stopFinishReason
|
||||
return &ChatCompletionsStreamResponse{
|
||||
Id: common.GetUUID(),
|
||||
Object: "chat.completion.chunk",
|
||||
Created: common.GetTimestamp(),
|
||||
Model: "",
|
||||
Choices: []openai.ChatCompletionsStreamResponseChoice{choice},
|
||||
Choices: []ChatCompletionsStreamResponseChoice{choice},
|
||||
}
|
||||
}
|
||||
|
||||
func streamResponseAIProxyLibrary2OpenAI(response *LibraryStreamResponse) *openai.ChatCompletionsStreamResponse {
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
func streamResponseAIProxyLibrary2OpenAI(response *AIProxyLibraryStreamResponse) *ChatCompletionsStreamResponse {
|
||||
var choice ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = response.Content
|
||||
return &openai.ChatCompletionsStreamResponse{
|
||||
return &ChatCompletionsStreamResponse{
|
||||
Id: common.GetUUID(),
|
||||
Object: "chat.completion.chunk",
|
||||
Created: common.GetTimestamp(),
|
||||
Model: response.Model,
|
||||
Choices: []openai.ChatCompletionsStreamResponseChoice{choice},
|
||||
Choices: []ChatCompletionsStreamResponseChoice{choice},
|
||||
}
|
||||
}
|
||||
|
||||
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
var usage openai.Usage
|
||||
func aiProxyLibraryStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
var usage Usage
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
@@ -114,12 +143,12 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
common.SetEventStreamHeaders(c)
|
||||
var documents []LibraryDocument
|
||||
setEventStreamHeaders(c)
|
||||
var documents []AIProxyLibraryDocument
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
var AIProxyLibraryResponse LibraryStreamResponse
|
||||
var AIProxyLibraryResponse AIProxyLibraryStreamResponse
|
||||
err := json.Unmarshal([]byte(data), &AIProxyLibraryResponse)
|
||||
if err != nil {
|
||||
common.SysError("error unmarshalling stream response: " + err.Error())
|
||||
@@ -150,28 +179,28 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
})
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
return nil, &usage
|
||||
}
|
||||
|
||||
func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
var AIProxyLibraryResponse LibraryResponse
|
||||
func aiProxyLibraryHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
var AIProxyLibraryResponse AIProxyLibraryResponse
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &AIProxyLibraryResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
if AIProxyLibraryResponse.ErrCode != 0 {
|
||||
return &openai.ErrorWithStatusCode{
|
||||
Error: openai.Error{
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: OpenAIError{
|
||||
Message: AIProxyLibraryResponse.Message,
|
||||
Type: strconv.Itoa(AIProxyLibraryResponse.ErrCode),
|
||||
Code: AIProxyLibraryResponse.ErrCode,
|
||||
@@ -182,7 +211,7 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
|
||||
fullTextResponse := responseAIProxyLibrary2OpenAI(&AIProxyLibraryResponse)
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
c.Writer.Header().Set("Content-Type", "application/json")
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
@@ -1,4 +1,4 @@
|
||||
package ali
|
||||
package controller
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -7,43 +7,105 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/relay/channel/openai"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// https://help.aliyun.com/document_detail/613695.html?spm=a2c4g.2399480.0.0.1adb778fAdzP9w#341800c0f8w0r
|
||||
|
||||
const EnableSearchModelSuffix = "-internet"
|
||||
type AliMessage struct {
|
||||
Content string `json:"content"`
|
||||
Role string `json:"role"`
|
||||
}
|
||||
|
||||
func ConvertRequest(request openai.GeneralOpenAIRequest) *ChatRequest {
|
||||
messages := make([]Message, 0, len(request.Messages))
|
||||
type AliInput struct {
|
||||
//Prompt string `json:"prompt"`
|
||||
Messages []AliMessage `json:"messages"`
|
||||
}
|
||||
|
||||
type AliParameters struct {
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
Seed uint64 `json:"seed,omitempty"`
|
||||
EnableSearch bool `json:"enable_search,omitempty"`
|
||||
}
|
||||
|
||||
type AliChatRequest struct {
|
||||
Model string `json:"model"`
|
||||
Input AliInput `json:"input"`
|
||||
Parameters AliParameters `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
type AliEmbeddingRequest struct {
|
||||
Model string `json:"model"`
|
||||
Input struct {
|
||||
Texts []string `json:"texts"`
|
||||
} `json:"input"`
|
||||
Parameters *struct {
|
||||
TextType string `json:"text_type,omitempty"`
|
||||
} `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
type AliEmbedding struct {
|
||||
Embedding []float64 `json:"embedding"`
|
||||
TextIndex int `json:"text_index"`
|
||||
}
|
||||
|
||||
type AliEmbeddingResponse struct {
|
||||
Output struct {
|
||||
Embeddings []AliEmbedding `json:"embeddings"`
|
||||
} `json:"output"`
|
||||
Usage AliUsage `json:"usage"`
|
||||
AliError
|
||||
}
|
||||
|
||||
type AliError struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
RequestId string `json:"request_id"`
|
||||
}
|
||||
|
||||
type AliUsage struct {
|
||||
InputTokens int `json:"input_tokens"`
|
||||
OutputTokens int `json:"output_tokens"`
|
||||
TotalTokens int `json:"total_tokens"`
|
||||
}
|
||||
|
||||
type AliOutput struct {
|
||||
Text string `json:"text"`
|
||||
FinishReason string `json:"finish_reason"`
|
||||
}
|
||||
|
||||
type AliChatResponse struct {
|
||||
Output AliOutput `json:"output"`
|
||||
Usage AliUsage `json:"usage"`
|
||||
AliError
|
||||
}
|
||||
|
||||
func requestOpenAI2Ali(request GeneralOpenAIRequest) *AliChatRequest {
|
||||
messages := make([]AliMessage, 0, len(request.Messages))
|
||||
for i := 0; i < len(request.Messages); i++ {
|
||||
message := request.Messages[i]
|
||||
messages = append(messages, Message{
|
||||
messages = append(messages, AliMessage{
|
||||
Content: message.StringContent(),
|
||||
Role: strings.ToLower(message.Role),
|
||||
})
|
||||
}
|
||||
enableSearch := false
|
||||
aliModel := request.Model
|
||||
if strings.HasSuffix(aliModel, EnableSearchModelSuffix) {
|
||||
enableSearch = true
|
||||
aliModel = strings.TrimSuffix(aliModel, EnableSearchModelSuffix)
|
||||
}
|
||||
return &ChatRequest{
|
||||
Model: aliModel,
|
||||
Input: Input{
|
||||
return &AliChatRequest{
|
||||
Model: request.Model,
|
||||
Input: AliInput{
|
||||
Messages: messages,
|
||||
},
|
||||
Parameters: Parameters{
|
||||
EnableSearch: enableSearch,
|
||||
IncrementalOutput: request.Stream,
|
||||
},
|
||||
//Parameters: AliParameters{ // ChatGPT's parameters are not compatible with Ali's
|
||||
// TopP: request.TopP,
|
||||
// TopK: 50,
|
||||
// //Seed: 0,
|
||||
// //EnableSearch: false,
|
||||
//},
|
||||
}
|
||||
}
|
||||
|
||||
func ConvertEmbeddingRequest(request openai.GeneralOpenAIRequest) *EmbeddingRequest {
|
||||
return &EmbeddingRequest{
|
||||
func embeddingRequestOpenAI2Ali(request GeneralOpenAIRequest) *AliEmbeddingRequest {
|
||||
return &AliEmbeddingRequest{
|
||||
Model: "text-embedding-v1",
|
||||
Input: struct {
|
||||
Texts []string `json:"texts"`
|
||||
@@ -53,21 +115,21 @@ func ConvertEmbeddingRequest(request openai.GeneralOpenAIRequest) *EmbeddingRequ
|
||||
}
|
||||
}
|
||||
|
||||
func EmbeddingHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
var aliResponse EmbeddingResponse
|
||||
func aliEmbeddingHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
var aliResponse AliEmbeddingResponse
|
||||
err := json.NewDecoder(resp.Body).Decode(&aliResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
|
||||
if aliResponse.Code != "" {
|
||||
return &openai.ErrorWithStatusCode{
|
||||
Error: openai.Error{
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: OpenAIError{
|
||||
Message: aliResponse.Message,
|
||||
Type: aliResponse.Code,
|
||||
Param: aliResponse.RequestId,
|
||||
@@ -80,7 +142,7 @@ func EmbeddingHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithSta
|
||||
fullTextResponse := embeddingResponseAli2OpenAI(&aliResponse)
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
c.Writer.Header().Set("Content-Type", "application/json")
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
@@ -88,16 +150,16 @@ func EmbeddingHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithSta
|
||||
return nil, &fullTextResponse.Usage
|
||||
}
|
||||
|
||||
func embeddingResponseAli2OpenAI(response *EmbeddingResponse) *openai.EmbeddingResponse {
|
||||
openAIEmbeddingResponse := openai.EmbeddingResponse{
|
||||
func embeddingResponseAli2OpenAI(response *AliEmbeddingResponse) *OpenAIEmbeddingResponse {
|
||||
openAIEmbeddingResponse := OpenAIEmbeddingResponse{
|
||||
Object: "list",
|
||||
Data: make([]openai.EmbeddingResponseItem, 0, len(response.Output.Embeddings)),
|
||||
Data: make([]OpenAIEmbeddingResponseItem, 0, len(response.Output.Embeddings)),
|
||||
Model: "text-embedding-v1",
|
||||
Usage: openai.Usage{TotalTokens: response.Usage.TotalTokens},
|
||||
Usage: Usage{TotalTokens: response.Usage.TotalTokens},
|
||||
}
|
||||
|
||||
for _, item := range response.Output.Embeddings {
|
||||
openAIEmbeddingResponse.Data = append(openAIEmbeddingResponse.Data, openai.EmbeddingResponseItem{
|
||||
openAIEmbeddingResponse.Data = append(openAIEmbeddingResponse.Data, OpenAIEmbeddingResponseItem{
|
||||
Object: `embedding`,
|
||||
Index: item.TextIndex,
|
||||
Embedding: item.Embedding,
|
||||
@@ -106,21 +168,21 @@ func embeddingResponseAli2OpenAI(response *EmbeddingResponse) *openai.EmbeddingR
|
||||
return &openAIEmbeddingResponse
|
||||
}
|
||||
|
||||
func responseAli2OpenAI(response *ChatResponse) *openai.TextResponse {
|
||||
choice := openai.TextResponseChoice{
|
||||
func responseAli2OpenAI(response *AliChatResponse) *OpenAITextResponse {
|
||||
choice := OpenAITextResponseChoice{
|
||||
Index: 0,
|
||||
Message: openai.Message{
|
||||
Message: Message{
|
||||
Role: "assistant",
|
||||
Content: response.Output.Text,
|
||||
},
|
||||
FinishReason: response.Output.FinishReason,
|
||||
}
|
||||
fullTextResponse := openai.TextResponse{
|
||||
fullTextResponse := OpenAITextResponse{
|
||||
Id: response.RequestId,
|
||||
Object: "chat.completion",
|
||||
Created: common.GetTimestamp(),
|
||||
Choices: []openai.TextResponseChoice{choice},
|
||||
Usage: openai.Usage{
|
||||
Choices: []OpenAITextResponseChoice{choice},
|
||||
Usage: Usage{
|
||||
PromptTokens: response.Usage.InputTokens,
|
||||
CompletionTokens: response.Usage.OutputTokens,
|
||||
TotalTokens: response.Usage.InputTokens + response.Usage.OutputTokens,
|
||||
@@ -129,25 +191,25 @@ func responseAli2OpenAI(response *ChatResponse) *openai.TextResponse {
|
||||
return &fullTextResponse
|
||||
}
|
||||
|
||||
func streamResponseAli2OpenAI(aliResponse *ChatResponse) *openai.ChatCompletionsStreamResponse {
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
func streamResponseAli2OpenAI(aliResponse *AliChatResponse) *ChatCompletionsStreamResponse {
|
||||
var choice ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = aliResponse.Output.Text
|
||||
if aliResponse.Output.FinishReason != "null" {
|
||||
finishReason := aliResponse.Output.FinishReason
|
||||
choice.FinishReason = &finishReason
|
||||
}
|
||||
response := openai.ChatCompletionsStreamResponse{
|
||||
response := ChatCompletionsStreamResponse{
|
||||
Id: aliResponse.RequestId,
|
||||
Object: "chat.completion.chunk",
|
||||
Created: common.GetTimestamp(),
|
||||
Model: "qwen",
|
||||
Choices: []openai.ChatCompletionsStreamResponseChoice{choice},
|
||||
Model: "ernie-bot",
|
||||
Choices: []ChatCompletionsStreamResponseChoice{choice},
|
||||
}
|
||||
return &response
|
||||
}
|
||||
|
||||
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
var usage openai.Usage
|
||||
func aliStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
var usage Usage
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
@@ -177,12 +239,12 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
common.SetEventStreamHeaders(c)
|
||||
//lastResponseText := ""
|
||||
setEventStreamHeaders(c)
|
||||
lastResponseText := ""
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
var aliResponse ChatResponse
|
||||
var aliResponse AliChatResponse
|
||||
err := json.Unmarshal([]byte(data), &aliResponse)
|
||||
if err != nil {
|
||||
common.SysError("error unmarshalling stream response: " + err.Error())
|
||||
@@ -194,8 +256,8 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
usage.TotalTokens = aliResponse.Usage.InputTokens + aliResponse.Usage.OutputTokens
|
||||
}
|
||||
response := streamResponseAli2OpenAI(&aliResponse)
|
||||
//response.Choices[0].Delta.Content = strings.TrimPrefix(response.Choices[0].Delta.Content, lastResponseText)
|
||||
//lastResponseText = aliResponse.Output.Text
|
||||
response.Choices[0].Delta.Content = strings.TrimPrefix(response.Choices[0].Delta.Content, lastResponseText)
|
||||
lastResponseText = aliResponse.Output.Text
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
common.SysError("error marshalling stream response: " + err.Error())
|
||||
@@ -210,28 +272,28 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
})
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
return nil, &usage
|
||||
}
|
||||
|
||||
func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
var aliResponse ChatResponse
|
||||
func aliHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
var aliResponse AliChatResponse
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &aliResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
if aliResponse.Code != "" {
|
||||
return &openai.ErrorWithStatusCode{
|
||||
Error: openai.Error{
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: OpenAIError{
|
||||
Message: aliResponse.Message,
|
||||
Type: aliResponse.Code,
|
||||
Param: aliResponse.RequestId,
|
||||
@@ -241,10 +303,9 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
|
||||
}, nil
|
||||
}
|
||||
fullTextResponse := responseAli2OpenAI(&aliResponse)
|
||||
fullTextResponse.Model = "qwen"
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
c.Writer.Header().Set("Content-Type", "application/json")
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
@@ -12,13 +12,10 @@ import (
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/relay/channel/openai"
|
||||
"one-api/relay/constant"
|
||||
"one-api/relay/util"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func RelayAudioHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode {
|
||||
func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
audioModel := "whisper-1"
|
||||
|
||||
tokenId := c.GetInt("token_id")
|
||||
@@ -28,18 +25,18 @@ func RelayAudioHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
group := c.GetString("group")
|
||||
tokenName := c.GetString("token_name")
|
||||
|
||||
var ttsRequest openai.TextToSpeechRequest
|
||||
if relayMode == constant.RelayModeAudioSpeech {
|
||||
var ttsRequest TextToSpeechRequest
|
||||
if relayMode == RelayModeAudioSpeech {
|
||||
// Read JSON
|
||||
err := common.UnmarshalBodyReusable(c, &ttsRequest)
|
||||
// Check if JSON is valid
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "invalid_json", http.StatusBadRequest)
|
||||
return errorWrapper(err, "invalid_json", http.StatusBadRequest)
|
||||
}
|
||||
audioModel = ttsRequest.Model
|
||||
// Check if text is too long 4096
|
||||
if len(ttsRequest.Input) > 4096 {
|
||||
return openai.ErrorWrapper(errors.New("input is too long (over 4096 characters)"), "text_too_long", http.StatusBadRequest)
|
||||
return errorWrapper(errors.New("input is too long (over 4096 characters)"), "text_too_long", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,7 +46,7 @@ func RelayAudioHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
var quota int
|
||||
var preConsumedQuota int
|
||||
switch relayMode {
|
||||
case constant.RelayModeAudioSpeech:
|
||||
case RelayModeAudioSpeech:
|
||||
preConsumedQuota = int(float64(len(ttsRequest.Input)) * ratio)
|
||||
quota = preConsumedQuota
|
||||
default:
|
||||
@@ -57,16 +54,16 @@ func RelayAudioHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}
|
||||
userQuota, err := model.CacheGetUserQuota(userId)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// Check if user quota is enough
|
||||
if userQuota-preConsumedQuota < 0 {
|
||||
return openai.ErrorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||
return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||
}
|
||||
err = model.CacheDecreaseUserQuota(userId, preConsumedQuota)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError)
|
||||
}
|
||||
if userQuota > 100*preConsumedQuota {
|
||||
// in this case, we do not pre-consume quota
|
||||
@@ -76,7 +73,7 @@ func RelayAudioHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
if preConsumedQuota > 0 {
|
||||
err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
|
||||
return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,7 +83,7 @@ func RelayAudioHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
modelMap := make(map[string]string)
|
||||
err := json.Unmarshal([]byte(modelMapping), &modelMap)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
|
||||
}
|
||||
if modelMap[audioModel] != "" {
|
||||
audioModel = modelMap[audioModel]
|
||||
@@ -99,27 +96,27 @@ func RelayAudioHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
baseURL = c.GetString("base_url")
|
||||
}
|
||||
|
||||
fullRequestURL := util.GetFullRequestURL(baseURL, requestURL, channelType)
|
||||
if relayMode == constant.RelayModeAudioTranscription && channelType == common.ChannelTypeAzure {
|
||||
fullRequestURL := getFullRequestURL(baseURL, requestURL, channelType)
|
||||
if relayMode == RelayModeAudioTranscription && channelType == common.ChannelTypeAzure {
|
||||
// https://learn.microsoft.com/en-us/azure/ai-services/openai/whisper-quickstart?tabs=command-line#rest-api
|
||||
apiVersion := util.GetAPIVersion(c)
|
||||
apiVersion := GetAPIVersion(c)
|
||||
fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/audio/transcriptions?api-version=%s", baseURL, audioModel, apiVersion)
|
||||
}
|
||||
|
||||
requestBody := &bytes.Buffer{}
|
||||
_, err = io.Copy(requestBody, c.Request.Body)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "new_request_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "new_request_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody.Bytes()))
|
||||
responseFormat := c.DefaultPostForm("response_format", "json")
|
||||
|
||||
req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if relayMode == constant.RelayModeAudioTranscription && channelType == common.ChannelTypeAzure {
|
||||
if relayMode == RelayModeAudioTranscription && channelType == common.ChannelTypeAzure {
|
||||
// https://learn.microsoft.com/en-us/azure/ai-services/openai/whisper-quickstart?tabs=command-line#rest-api
|
||||
apiKey := c.Request.Header.Get("Authorization")
|
||||
apiKey = strings.TrimPrefix(apiKey, "Bearer ")
|
||||
@@ -131,34 +128,34 @@ func RelayAudioHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
|
||||
req.Header.Set("Accept", c.Request.Header.Get("Accept"))
|
||||
|
||||
resp, err := util.HTTPClient.Do(req)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "do_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "do_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
err = req.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = c.Request.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if relayMode != constant.RelayModeAudioSpeech {
|
||||
if relayMode != RelayModeAudioSpeech {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
var openAIErr openai.SlimTextResponse
|
||||
var openAIErr TextResponse
|
||||
if err = json.Unmarshal(responseBody, &openAIErr); err == nil {
|
||||
if openAIErr.Error.Message != "" {
|
||||
return openai.ErrorWrapper(fmt.Errorf("type %s, code %v, message %s", openAIErr.Error.Type, openAIErr.Error.Code, openAIErr.Error.Message), "request_error", http.StatusInternalServerError)
|
||||
return errorWrapper(fmt.Errorf("type %s, code %v, message %s", openAIErr.Error.Type, openAIErr.Error.Code, openAIErr.Error.Message), "request_error", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -175,12 +172,12 @@ func RelayAudioHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
case "vtt":
|
||||
text, err = getTextFromVTT(responseBody)
|
||||
default:
|
||||
return openai.ErrorWrapper(errors.New("unexpected_response_format"), "unexpected_response_format", http.StatusInternalServerError)
|
||||
return errorWrapper(errors.New("unexpected_response_format"), "unexpected_response_format", http.StatusInternalServerError)
|
||||
}
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "get_text_from_body_err", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "get_text_from_body_err", http.StatusInternalServerError)
|
||||
}
|
||||
quota = openai.CountTokenText(text, audioModel)
|
||||
quota = countTokenText(text, audioModel)
|
||||
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
@@ -196,11 +193,11 @@ func RelayAudioHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}()
|
||||
}(c.Request.Context())
|
||||
}
|
||||
return util.RelayErrorHandler(resp)
|
||||
return relayErrorHandler(resp)
|
||||
}
|
||||
quotaDelta := quota - preConsumedQuota
|
||||
defer func(ctx context.Context) {
|
||||
go util.PostConsumeQuota(ctx, tokenId, quotaDelta, quota, userId, channelId, modelRatio, groupRatio, audioModel, tokenName)
|
||||
go postConsumeQuota(ctx, tokenId, quotaDelta, quota, userId, channelId, modelRatio, groupRatio, audioModel, tokenName)
|
||||
}(c.Request.Context())
|
||||
|
||||
for k, v := range resp.Header {
|
||||
@@ -210,11 +207,11 @@ func RelayAudioHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
|
||||
_, err = io.Copy(c.Writer, resp.Body)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -224,7 +221,7 @@ func getTextFromVTT(body []byte) (string, error) {
|
||||
}
|
||||
|
||||
func getTextFromVerboseJSON(body []byte) (string, error) {
|
||||
var whisperResponse openai.WhisperVerboseJSONResponse
|
||||
var whisperResponse WhisperVerboseJSONResponse
|
||||
if err := json.Unmarshal(body, &whisperResponse); err != nil {
|
||||
return "", fmt.Errorf("unmarshal_response_body_failed err :%w", err)
|
||||
}
|
||||
@@ -257,7 +254,7 @@ func getTextFromText(body []byte) (string, error) {
|
||||
}
|
||||
|
||||
func getTextFromJSON(body []byte) (string, error) {
|
||||
var whisperResponse openai.WhisperJSONResponse
|
||||
var whisperResponse WhisperJSONResponse
|
||||
if err := json.Unmarshal(body, &whisperResponse); err != nil {
|
||||
return "", fmt.Errorf("unmarshal_response_body_failed err :%w", err)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package baidu
|
||||
package controller
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -9,9 +9,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/relay/channel/openai"
|
||||
"one-api/relay/constant"
|
||||
"one-api/relay/util"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -40,9 +37,53 @@ type BaiduError struct {
|
||||
ErrorMsg string `json:"error_msg"`
|
||||
}
|
||||
|
||||
type BaiduChatResponse struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Result string `json:"result"`
|
||||
IsTruncated bool `json:"is_truncated"`
|
||||
NeedClearHistory bool `json:"need_clear_history"`
|
||||
Usage Usage `json:"usage"`
|
||||
BaiduError
|
||||
}
|
||||
|
||||
type BaiduChatStreamResponse struct {
|
||||
BaiduChatResponse
|
||||
SentenceId int `json:"sentence_id"`
|
||||
IsEnd bool `json:"is_end"`
|
||||
}
|
||||
|
||||
type BaiduEmbeddingRequest struct {
|
||||
Input []string `json:"input"`
|
||||
}
|
||||
|
||||
type BaiduEmbeddingData struct {
|
||||
Object string `json:"object"`
|
||||
Embedding []float64 `json:"embedding"`
|
||||
Index int `json:"index"`
|
||||
}
|
||||
|
||||
type BaiduEmbeddingResponse struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Data []BaiduEmbeddingData `json:"data"`
|
||||
Usage Usage `json:"usage"`
|
||||
BaiduError
|
||||
}
|
||||
|
||||
type BaiduAccessToken struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
Error string `json:"error,omitempty"`
|
||||
ErrorDescription string `json:"error_description,omitempty"`
|
||||
ExpiresIn int64 `json:"expires_in,omitempty"`
|
||||
ExpiresAt time.Time `json:"-"`
|
||||
}
|
||||
|
||||
var baiduTokenStore sync.Map
|
||||
|
||||
func ConvertRequest(request openai.GeneralOpenAIRequest) *BaiduChatRequest {
|
||||
func requestOpenAI2Baidu(request GeneralOpenAIRequest) *BaiduChatRequest {
|
||||
messages := make([]BaiduMessage, 0, len(request.Messages))
|
||||
for _, message := range request.Messages {
|
||||
if message.Role == "system" {
|
||||
@@ -67,56 +108,56 @@ func ConvertRequest(request openai.GeneralOpenAIRequest) *BaiduChatRequest {
|
||||
}
|
||||
}
|
||||
|
||||
func responseBaidu2OpenAI(response *ChatResponse) *openai.TextResponse {
|
||||
choice := openai.TextResponseChoice{
|
||||
func responseBaidu2OpenAI(response *BaiduChatResponse) *OpenAITextResponse {
|
||||
choice := OpenAITextResponseChoice{
|
||||
Index: 0,
|
||||
Message: openai.Message{
|
||||
Message: Message{
|
||||
Role: "assistant",
|
||||
Content: response.Result,
|
||||
},
|
||||
FinishReason: "stop",
|
||||
}
|
||||
fullTextResponse := openai.TextResponse{
|
||||
fullTextResponse := OpenAITextResponse{
|
||||
Id: response.Id,
|
||||
Object: "chat.completion",
|
||||
Created: response.Created,
|
||||
Choices: []openai.TextResponseChoice{choice},
|
||||
Choices: []OpenAITextResponseChoice{choice},
|
||||
Usage: response.Usage,
|
||||
}
|
||||
return &fullTextResponse
|
||||
}
|
||||
|
||||
func streamResponseBaidu2OpenAI(baiduResponse *ChatStreamResponse) *openai.ChatCompletionsStreamResponse {
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
func streamResponseBaidu2OpenAI(baiduResponse *BaiduChatStreamResponse) *ChatCompletionsStreamResponse {
|
||||
var choice ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = baiduResponse.Result
|
||||
if baiduResponse.IsEnd {
|
||||
choice.FinishReason = &constant.StopFinishReason
|
||||
choice.FinishReason = &stopFinishReason
|
||||
}
|
||||
response := openai.ChatCompletionsStreamResponse{
|
||||
response := ChatCompletionsStreamResponse{
|
||||
Id: baiduResponse.Id,
|
||||
Object: "chat.completion.chunk",
|
||||
Created: baiduResponse.Created,
|
||||
Model: "ernie-bot",
|
||||
Choices: []openai.ChatCompletionsStreamResponseChoice{choice},
|
||||
Choices: []ChatCompletionsStreamResponseChoice{choice},
|
||||
}
|
||||
return &response
|
||||
}
|
||||
|
||||
func ConvertEmbeddingRequest(request openai.GeneralOpenAIRequest) *EmbeddingRequest {
|
||||
return &EmbeddingRequest{
|
||||
func embeddingRequestOpenAI2Baidu(request GeneralOpenAIRequest) *BaiduEmbeddingRequest {
|
||||
return &BaiduEmbeddingRequest{
|
||||
Input: request.ParseInput(),
|
||||
}
|
||||
}
|
||||
|
||||
func embeddingResponseBaidu2OpenAI(response *EmbeddingResponse) *openai.EmbeddingResponse {
|
||||
openAIEmbeddingResponse := openai.EmbeddingResponse{
|
||||
func embeddingResponseBaidu2OpenAI(response *BaiduEmbeddingResponse) *OpenAIEmbeddingResponse {
|
||||
openAIEmbeddingResponse := OpenAIEmbeddingResponse{
|
||||
Object: "list",
|
||||
Data: make([]openai.EmbeddingResponseItem, 0, len(response.Data)),
|
||||
Data: make([]OpenAIEmbeddingResponseItem, 0, len(response.Data)),
|
||||
Model: "baidu-embedding",
|
||||
Usage: response.Usage,
|
||||
}
|
||||
for _, item := range response.Data {
|
||||
openAIEmbeddingResponse.Data = append(openAIEmbeddingResponse.Data, openai.EmbeddingResponseItem{
|
||||
openAIEmbeddingResponse.Data = append(openAIEmbeddingResponse.Data, OpenAIEmbeddingResponseItem{
|
||||
Object: item.Object,
|
||||
Index: item.Index,
|
||||
Embedding: item.Embedding,
|
||||
@@ -125,8 +166,8 @@ func embeddingResponseBaidu2OpenAI(response *EmbeddingResponse) *openai.Embeddin
|
||||
return &openAIEmbeddingResponse
|
||||
}
|
||||
|
||||
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
var usage openai.Usage
|
||||
func baiduStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
var usage Usage
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
@@ -153,11 +194,11 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
common.SetEventStreamHeaders(c)
|
||||
setEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
var baiduResponse ChatStreamResponse
|
||||
var baiduResponse BaiduChatStreamResponse
|
||||
err := json.Unmarshal([]byte(data), &baiduResponse)
|
||||
if err != nil {
|
||||
common.SysError("error unmarshalling stream response: " + err.Error())
|
||||
@@ -183,28 +224,28 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
})
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
return nil, &usage
|
||||
}
|
||||
|
||||
func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
var baiduResponse ChatResponse
|
||||
func baiduHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
var baiduResponse BaiduChatResponse
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &baiduResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
if baiduResponse.ErrorMsg != "" {
|
||||
return &openai.ErrorWithStatusCode{
|
||||
Error: openai.Error{
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: OpenAIError{
|
||||
Message: baiduResponse.ErrorMsg,
|
||||
Type: "baidu_error",
|
||||
Param: "",
|
||||
@@ -214,10 +255,9 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
|
||||
}, nil
|
||||
}
|
||||
fullTextResponse := responseBaidu2OpenAI(&baiduResponse)
|
||||
fullTextResponse.Model = "ernie-bot"
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
c.Writer.Header().Set("Content-Type", "application/json")
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
@@ -225,23 +265,23 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
|
||||
return nil, &fullTextResponse.Usage
|
||||
}
|
||||
|
||||
func EmbeddingHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
var baiduResponse EmbeddingResponse
|
||||
func baiduEmbeddingHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
var baiduResponse BaiduEmbeddingResponse
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &baiduResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
if baiduResponse.ErrorMsg != "" {
|
||||
return &openai.ErrorWithStatusCode{
|
||||
Error: openai.Error{
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: OpenAIError{
|
||||
Message: baiduResponse.ErrorMsg,
|
||||
Type: "baidu_error",
|
||||
Param: "",
|
||||
@@ -253,7 +293,7 @@ func EmbeddingHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithSta
|
||||
fullTextResponse := embeddingResponseBaidu2OpenAI(&baiduResponse)
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
c.Writer.Header().Set("Content-Type", "application/json")
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
@@ -261,10 +301,10 @@ func EmbeddingHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithSta
|
||||
return nil, &fullTextResponse.Usage
|
||||
}
|
||||
|
||||
func GetAccessToken(apiKey string) (string, error) {
|
||||
func getBaiduAccessToken(apiKey string) (string, error) {
|
||||
if val, ok := baiduTokenStore.Load(apiKey); ok {
|
||||
var accessToken AccessToken
|
||||
if accessToken, ok = val.(AccessToken); ok {
|
||||
var accessToken BaiduAccessToken
|
||||
if accessToken, ok = val.(BaiduAccessToken); ok {
|
||||
// soon this will expire
|
||||
if time.Now().Add(time.Hour).After(accessToken.ExpiresAt) {
|
||||
go func() {
|
||||
@@ -279,12 +319,12 @@ func GetAccessToken(apiKey string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
if accessToken == nil {
|
||||
return "", errors.New("GetAccessToken return a nil token")
|
||||
return "", errors.New("getBaiduAccessToken return a nil token")
|
||||
}
|
||||
return (*accessToken).AccessToken, nil
|
||||
}
|
||||
|
||||
func getBaiduAccessTokenHelper(apiKey string) (*AccessToken, error) {
|
||||
func getBaiduAccessTokenHelper(apiKey string) (*BaiduAccessToken, error) {
|
||||
parts := strings.Split(apiKey, "|")
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.New("invalid baidu apikey")
|
||||
@@ -296,13 +336,13 @@ func getBaiduAccessTokenHelper(apiKey string) (*AccessToken, error) {
|
||||
}
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
req.Header.Add("Accept", "application/json")
|
||||
res, err := util.ImpatientHTTPClient.Do(req)
|
||||
res, err := impatientHTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
var accessToken AccessToken
|
||||
var accessToken BaiduAccessToken
|
||||
err = json.NewDecoder(res.Body).Decode(&accessToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1,4 +1,4 @@
|
||||
package anthropic
|
||||
package controller
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -8,10 +8,37 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/relay/channel/openai"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ClaudeMetadata struct {
|
||||
UserId string `json:"user_id"`
|
||||
}
|
||||
|
||||
type ClaudeRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt"`
|
||||
MaxTokensToSample int `json:"max_tokens_to_sample"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
//ClaudeMetadata `json:"metadata,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
}
|
||||
|
||||
type ClaudeError struct {
|
||||
Type string `json:"type"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type ClaudeResponse struct {
|
||||
Completion string `json:"completion"`
|
||||
StopReason string `json:"stop_reason"`
|
||||
Model string `json:"model"`
|
||||
Error ClaudeError `json:"error"`
|
||||
}
|
||||
|
||||
func stopReasonClaude2OpenAI(reason string) string {
|
||||
switch reason {
|
||||
case "stop_sequence":
|
||||
@@ -23,8 +50,8 @@ func stopReasonClaude2OpenAI(reason string) string {
|
||||
}
|
||||
}
|
||||
|
||||
func ConvertRequest(textRequest openai.GeneralOpenAIRequest) *Request {
|
||||
claudeRequest := Request{
|
||||
func requestOpenAI2Claude(textRequest GeneralOpenAIRequest) *ClaudeRequest {
|
||||
claudeRequest := ClaudeRequest{
|
||||
Model: textRequest.Model,
|
||||
Prompt: "",
|
||||
MaxTokensToSample: textRequest.MaxTokens,
|
||||
@@ -53,40 +80,40 @@ func ConvertRequest(textRequest openai.GeneralOpenAIRequest) *Request {
|
||||
return &claudeRequest
|
||||
}
|
||||
|
||||
func streamResponseClaude2OpenAI(claudeResponse *Response) *openai.ChatCompletionsStreamResponse {
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
func streamResponseClaude2OpenAI(claudeResponse *ClaudeResponse) *ChatCompletionsStreamResponse {
|
||||
var choice ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = claudeResponse.Completion
|
||||
finishReason := stopReasonClaude2OpenAI(claudeResponse.StopReason)
|
||||
if finishReason != "null" {
|
||||
choice.FinishReason = &finishReason
|
||||
}
|
||||
var response openai.ChatCompletionsStreamResponse
|
||||
var response ChatCompletionsStreamResponse
|
||||
response.Object = "chat.completion.chunk"
|
||||
response.Model = claudeResponse.Model
|
||||
response.Choices = []openai.ChatCompletionsStreamResponseChoice{choice}
|
||||
response.Choices = []ChatCompletionsStreamResponseChoice{choice}
|
||||
return &response
|
||||
}
|
||||
|
||||
func responseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse {
|
||||
choice := openai.TextResponseChoice{
|
||||
func responseClaude2OpenAI(claudeResponse *ClaudeResponse) *OpenAITextResponse {
|
||||
choice := OpenAITextResponseChoice{
|
||||
Index: 0,
|
||||
Message: openai.Message{
|
||||
Message: Message{
|
||||
Role: "assistant",
|
||||
Content: strings.TrimPrefix(claudeResponse.Completion, " "),
|
||||
Name: nil,
|
||||
},
|
||||
FinishReason: stopReasonClaude2OpenAI(claudeResponse.StopReason),
|
||||
}
|
||||
fullTextResponse := openai.TextResponse{
|
||||
fullTextResponse := OpenAITextResponse{
|
||||
Id: fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
|
||||
Object: "chat.completion",
|
||||
Created: common.GetTimestamp(),
|
||||
Choices: []openai.TextResponseChoice{choice},
|
||||
Choices: []OpenAITextResponseChoice{choice},
|
||||
}
|
||||
return &fullTextResponse
|
||||
}
|
||||
|
||||
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, string) {
|
||||
func claudeStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, string) {
|
||||
responseText := ""
|
||||
responseId := fmt.Sprintf("chatcmpl-%s", common.GetUUID())
|
||||
createdTime := common.GetTimestamp()
|
||||
@@ -116,13 +143,13 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
common.SetEventStreamHeaders(c)
|
||||
setEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
// some implementations may add \r at the end of data
|
||||
data = strings.TrimSuffix(data, "\r")
|
||||
var claudeResponse Response
|
||||
var claudeResponse ClaudeResponse
|
||||
err := json.Unmarshal([]byte(data), &claudeResponse)
|
||||
if err != nil {
|
||||
common.SysError("error unmarshalling stream response: " + err.Error())
|
||||
@@ -146,28 +173,28 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
})
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
return nil, responseText
|
||||
}
|
||||
|
||||
func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
func claudeHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
var claudeResponse Response
|
||||
var claudeResponse ClaudeResponse
|
||||
err = json.Unmarshal(responseBody, &claudeResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
if claudeResponse.Error.Type != "" {
|
||||
return &openai.ErrorWithStatusCode{
|
||||
Error: openai.Error{
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: OpenAIError{
|
||||
Message: claudeResponse.Error.Message,
|
||||
Type: claudeResponse.Error.Type,
|
||||
Param: "",
|
||||
@@ -177,9 +204,8 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string
|
||||
}, nil
|
||||
}
|
||||
fullTextResponse := responseClaude2OpenAI(&claudeResponse)
|
||||
fullTextResponse.Model = model
|
||||
completionTokens := openai.CountTokenText(claudeResponse.Completion, model)
|
||||
usage := openai.Usage{
|
||||
completionTokens := countTokenText(claudeResponse.Completion, model)
|
||||
usage := Usage{
|
||||
PromptTokens: promptTokens,
|
||||
CompletionTokens: completionTokens,
|
||||
TotalTokens: promptTokens + completionTokens,
|
||||
@@ -187,7 +213,7 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string
|
||||
fullTextResponse.Usage = usage
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
c.Writer.Header().Set("Content-Type", "application/json")
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
@@ -1,4 +1,4 @@
|
||||
package google
|
||||
package controller
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -7,42 +7,73 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/common/image"
|
||||
"one-api/relay/channel/openai"
|
||||
"one-api/relay/constant"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// https://ai.google.dev/docs/gemini_api_overview?hl=zh-cn
|
||||
type GeminiChatRequest struct {
|
||||
Contents []GeminiChatContent `json:"contents"`
|
||||
SafetySettings []GeminiChatSafetySettings `json:"safety_settings,omitempty"`
|
||||
GenerationConfig GeminiChatGenerationConfig `json:"generation_config,omitempty"`
|
||||
Tools []GeminiChatTools `json:"tools,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
GeminiVisionMaxImageNum = 16
|
||||
)
|
||||
type GeminiInlineData struct {
|
||||
MimeType string `json:"mimeType"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
type GeminiPart struct {
|
||||
Text string `json:"text,omitempty"`
|
||||
InlineData *GeminiInlineData `json:"inlineData,omitempty"`
|
||||
}
|
||||
|
||||
type GeminiChatContent struct {
|
||||
Role string `json:"role,omitempty"`
|
||||
Parts []GeminiPart `json:"parts"`
|
||||
}
|
||||
|
||||
type GeminiChatSafetySettings struct {
|
||||
Category string `json:"category"`
|
||||
Threshold string `json:"threshold"`
|
||||
}
|
||||
|
||||
type GeminiChatTools struct {
|
||||
FunctionDeclarations any `json:"functionDeclarations,omitempty"`
|
||||
}
|
||||
|
||||
type GeminiChatGenerationConfig struct {
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"topP,omitempty"`
|
||||
TopK float64 `json:"topK,omitempty"`
|
||||
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
|
||||
CandidateCount int `json:"candidateCount,omitempty"`
|
||||
StopSequences []string `json:"stopSequences,omitempty"`
|
||||
}
|
||||
|
||||
// Setting safety to the lowest possible values since Gemini is already powerless enough
|
||||
func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRequest {
|
||||
func requestOpenAI2Gemini(textRequest GeneralOpenAIRequest) *GeminiChatRequest {
|
||||
geminiRequest := GeminiChatRequest{
|
||||
Contents: make([]GeminiChatContent, 0, len(textRequest.Messages)),
|
||||
SafetySettings: []GeminiChatSafetySettings{
|
||||
{
|
||||
Category: "HARM_CATEGORY_HARASSMENT",
|
||||
Threshold: common.GeminiSafetySetting,
|
||||
},
|
||||
{
|
||||
Category: "HARM_CATEGORY_HATE_SPEECH",
|
||||
Threshold: common.GeminiSafetySetting,
|
||||
},
|
||||
{
|
||||
Category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
||||
Threshold: common.GeminiSafetySetting,
|
||||
},
|
||||
{
|
||||
Category: "HARM_CATEGORY_DANGEROUS_CONTENT",
|
||||
Threshold: common.GeminiSafetySetting,
|
||||
},
|
||||
},
|
||||
//SafetySettings: []GeminiChatSafetySettings{
|
||||
// {
|
||||
// Category: "HARM_CATEGORY_HARASSMENT",
|
||||
// Threshold: "BLOCK_ONLY_HIGH",
|
||||
// },
|
||||
// {
|
||||
// Category: "HARM_CATEGORY_HATE_SPEECH",
|
||||
// Threshold: "BLOCK_ONLY_HIGH",
|
||||
// },
|
||||
// {
|
||||
// Category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
||||
// Threshold: "BLOCK_ONLY_HIGH",
|
||||
// },
|
||||
// {
|
||||
// Category: "HARM_CATEGORY_DANGEROUS_CONTENT",
|
||||
// Threshold: "BLOCK_ONLY_HIGH",
|
||||
// },
|
||||
//},
|
||||
GenerationConfig: GeminiChatGenerationConfig{
|
||||
Temperature: textRequest.Temperature,
|
||||
TopP: textRequest.TopP,
|
||||
@@ -66,30 +97,6 @@ func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRe
|
||||
},
|
||||
},
|
||||
}
|
||||
openaiContent := message.ParseContent()
|
||||
var parts []GeminiPart
|
||||
imageNum := 0
|
||||
for _, part := range openaiContent {
|
||||
if part.Type == openai.ContentTypeText {
|
||||
parts = append(parts, GeminiPart{
|
||||
Text: part.Text,
|
||||
})
|
||||
} else if part.Type == openai.ContentTypeImageURL {
|
||||
imageNum += 1
|
||||
if imageNum > GeminiVisionMaxImageNum {
|
||||
continue
|
||||
}
|
||||
mimeType, data, _ := image.GetImageFromUrl(part.ImageURL.Url)
|
||||
parts = append(parts, GeminiPart{
|
||||
InlineData: &GeminiInlineData{
|
||||
MimeType: mimeType,
|
||||
Data: data,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
content.Parts = parts
|
||||
|
||||
// there's no assistant role in gemini and API shall vomit if Role is not user or model
|
||||
if content.Role == "assistant" {
|
||||
content.Role = "model"
|
||||
@@ -149,21 +156,21 @@ type GeminiChatPromptFeedback struct {
|
||||
SafetyRatings []GeminiChatSafetyRating `json:"safetyRatings"`
|
||||
}
|
||||
|
||||
func responseGeminiChat2OpenAI(response *GeminiChatResponse) *openai.TextResponse {
|
||||
fullTextResponse := openai.TextResponse{
|
||||
func responseGeminiChat2OpenAI(response *GeminiChatResponse) *OpenAITextResponse {
|
||||
fullTextResponse := OpenAITextResponse{
|
||||
Id: fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
|
||||
Object: "chat.completion",
|
||||
Created: common.GetTimestamp(),
|
||||
Choices: make([]openai.TextResponseChoice, 0, len(response.Candidates)),
|
||||
Choices: make([]OpenAITextResponseChoice, 0, len(response.Candidates)),
|
||||
}
|
||||
for i, candidate := range response.Candidates {
|
||||
choice := openai.TextResponseChoice{
|
||||
choice := OpenAITextResponseChoice{
|
||||
Index: i,
|
||||
Message: openai.Message{
|
||||
Message: Message{
|
||||
Role: "assistant",
|
||||
Content: "",
|
||||
},
|
||||
FinishReason: constant.StopFinishReason,
|
||||
FinishReason: stopFinishReason,
|
||||
}
|
||||
if len(candidate.Content.Parts) > 0 {
|
||||
choice.Message.Content = candidate.Content.Parts[0].Text
|
||||
@@ -173,18 +180,18 @@ func responseGeminiChat2OpenAI(response *GeminiChatResponse) *openai.TextRespons
|
||||
return &fullTextResponse
|
||||
}
|
||||
|
||||
func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) *openai.ChatCompletionsStreamResponse {
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) *ChatCompletionsStreamResponse {
|
||||
var choice ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = geminiResponse.GetResponseText()
|
||||
choice.FinishReason = &constant.StopFinishReason
|
||||
var response openai.ChatCompletionsStreamResponse
|
||||
choice.FinishReason = &stopFinishReason
|
||||
var response ChatCompletionsStreamResponse
|
||||
response.Object = "chat.completion.chunk"
|
||||
response.Model = "gemini"
|
||||
response.Choices = []openai.ChatCompletionsStreamResponseChoice{choice}
|
||||
response.Choices = []ChatCompletionsStreamResponseChoice{choice}
|
||||
return &response
|
||||
}
|
||||
|
||||
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, string) {
|
||||
func geminiChatStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, string) {
|
||||
responseText := ""
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
@@ -214,7 +221,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
common.SetEventStreamHeaders(c)
|
||||
setEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
@@ -226,14 +233,14 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
var dummy dummyStruct
|
||||
err := json.Unmarshal([]byte(data), &dummy)
|
||||
responseText += dummy.Content
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
var choice ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = dummy.Content
|
||||
response := openai.ChatCompletionsStreamResponse{
|
||||
response := ChatCompletionsStreamResponse{
|
||||
Id: fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
|
||||
Object: "chat.completion.chunk",
|
||||
Created: common.GetTimestamp(),
|
||||
Model: "gemini-pro",
|
||||
Choices: []openai.ChatCompletionsStreamResponseChoice{choice},
|
||||
Choices: []ChatCompletionsStreamResponseChoice{choice},
|
||||
}
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
@@ -249,28 +256,28 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
})
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
return nil, responseText
|
||||
}
|
||||
|
||||
func GeminiHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
func geminiChatHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
var geminiResponse GeminiChatResponse
|
||||
err = json.Unmarshal(responseBody, &geminiResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
if len(geminiResponse.Candidates) == 0 {
|
||||
return &openai.ErrorWithStatusCode{
|
||||
Error: openai.Error{
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: OpenAIError{
|
||||
Message: "No candidates returned",
|
||||
Type: "server_error",
|
||||
Param: "",
|
||||
@@ -280,9 +287,8 @@ func GeminiHandler(c *gin.Context, resp *http.Response, promptTokens int, model
|
||||
}, nil
|
||||
}
|
||||
fullTextResponse := responseGeminiChat2OpenAI(&geminiResponse)
|
||||
fullTextResponse.Model = model
|
||||
completionTokens := openai.CountTokenText(geminiResponse.GetResponseText(), model)
|
||||
usage := openai.Usage{
|
||||
completionTokens := countTokenText(geminiResponse.GetResponseText(), model)
|
||||
usage := Usage{
|
||||
PromptTokens: promptTokens,
|
||||
CompletionTokens: completionTokens,
|
||||
TotalTokens: promptTokens + completionTokens,
|
||||
@@ -290,7 +296,7 @@ func GeminiHandler(c *gin.Context, resp *http.Response, promptTokens int, model
|
||||
fullTextResponse.Usage = usage
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
c.Writer.Header().Set("Content-Type", "application/json")
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/relay/channel/openai"
|
||||
"one-api/relay/util"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -27,7 +25,7 @@ func isWithinRange(element string, value int) bool {
|
||||
return value >= min && value <= max
|
||||
}
|
||||
|
||||
func RelayImageHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode {
|
||||
func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
imageModel := "dall-e-2"
|
||||
imageSize := "1024x1024"
|
||||
|
||||
@@ -37,10 +35,10 @@ func RelayImageHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
userId := c.GetInt("id")
|
||||
group := c.GetString("group")
|
||||
|
||||
var imageRequest openai.ImageRequest
|
||||
var imageRequest ImageRequest
|
||||
err := common.UnmarshalBodyReusable(c, &imageRequest)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if imageRequest.N == 0 {
|
||||
@@ -69,24 +67,24 @@ func RelayImageHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return openai.ErrorWrapper(errors.New("size not supported for this image model"), "size_not_supported", http.StatusBadRequest)
|
||||
return errorWrapper(errors.New("size not supported for this image model"), "size_not_supported", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// Prompt validation
|
||||
if imageRequest.Prompt == "" {
|
||||
return openai.ErrorWrapper(errors.New("prompt is required"), "prompt_missing", http.StatusBadRequest)
|
||||
return errorWrapper(errors.New("prompt is required"), "prompt_missing", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// Check prompt length
|
||||
if len(imageRequest.Prompt) > common.DalleImagePromptLengthLimitations[imageModel] {
|
||||
return openai.ErrorWrapper(errors.New("prompt is too long"), "prompt_too_long", http.StatusBadRequest)
|
||||
return errorWrapper(errors.New("prompt is too long"), "prompt_too_long", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// Number of generated images validation
|
||||
if isWithinRange(imageModel, imageRequest.N) == false {
|
||||
// channel not azure
|
||||
if channelType != common.ChannelTypeAzure {
|
||||
return openai.ErrorWrapper(errors.New("invalid value of n"), "n_not_within_range", http.StatusBadRequest)
|
||||
return errorWrapper(errors.New("invalid value of n"), "n_not_within_range", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,7 +95,7 @@ func RelayImageHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
modelMap := make(map[string]string)
|
||||
err := json.Unmarshal([]byte(modelMapping), &modelMap)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
|
||||
}
|
||||
if modelMap[imageModel] != "" {
|
||||
imageModel = modelMap[imageModel]
|
||||
@@ -109,10 +107,10 @@ func RelayImageHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
if c.GetString("base_url") != "" {
|
||||
baseURL = c.GetString("base_url")
|
||||
}
|
||||
fullRequestURL := util.GetFullRequestURL(baseURL, requestURL, channelType)
|
||||
fullRequestURL := getFullRequestURL(baseURL, requestURL, channelType)
|
||||
if channelType == common.ChannelTypeAzure {
|
||||
// https://learn.microsoft.com/en-us/azure/ai-services/openai/dall-e-quickstart?tabs=dalle3%2Ccommand-line&pivots=rest-api
|
||||
apiVersion := util.GetAPIVersion(c)
|
||||
apiVersion := GetAPIVersion(c)
|
||||
// https://{resource_name}.openai.azure.com/openai/deployments/dall-e-3/images/generations?api-version=2023-06-01-preview
|
||||
fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/images/generations?api-version=%s", baseURL, imageModel, apiVersion)
|
||||
}
|
||||
@@ -121,7 +119,7 @@ func RelayImageHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
if isModelMapped || channelType == common.ChannelTypeAzure { // make Azure channel request body
|
||||
jsonStr, err := json.Marshal(imageRequest)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
requestBody = bytes.NewBuffer(jsonStr)
|
||||
} else {
|
||||
@@ -136,12 +134,12 @@ func RelayImageHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
quota := int(ratio*imageCostRatio*1000) * imageRequest.N
|
||||
|
||||
if userQuota-quota < 0 {
|
||||
return openai.ErrorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||
return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
token := c.Request.Header.Get("Authorization")
|
||||
if channelType == common.ChannelTypeAzure { // Azure authentication
|
||||
@@ -154,25 +152,22 @@ func RelayImageHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
|
||||
req.Header.Set("Accept", c.Request.Header.Get("Accept"))
|
||||
|
||||
resp, err := util.HTTPClient.Do(req)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "do_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "do_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
err = req.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = c.Request.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
var textResponse openai.ImageResponse
|
||||
var textResponse ImageResponse
|
||||
|
||||
defer func(ctx context.Context) {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return
|
||||
}
|
||||
err := model.PostConsumeTokenQuota(tokenId, quota)
|
||||
if err != nil {
|
||||
common.SysError("error consuming token remain quota: " + err.Error())
|
||||
@@ -194,15 +189,15 @@ func RelayImageHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &textResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
||||
@@ -214,11 +209,11 @@ func RelayImageHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
|
||||
_, err = io.Copy(c.Writer, resp.Body)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package openai
|
||||
package controller
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -8,11 +8,10 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/relay/constant"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*ErrorWithStatusCode, string) {
|
||||
func openaiStreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*OpenAIErrorWithStatusCode, string) {
|
||||
responseText := ""
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
@@ -42,7 +41,7 @@ func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*ErrorWi
|
||||
data = data[6:]
|
||||
if !strings.HasPrefix(data, "[DONE]") {
|
||||
switch relayMode {
|
||||
case constant.RelayModeChatCompletions:
|
||||
case RelayModeChatCompletions:
|
||||
var streamResponse ChatCompletionsStreamResponse
|
||||
err := json.Unmarshal([]byte(data), &streamResponse)
|
||||
if err != nil {
|
||||
@@ -52,7 +51,7 @@ func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*ErrorWi
|
||||
for _, choice := range streamResponse.Choices {
|
||||
responseText += choice.Delta.Content
|
||||
}
|
||||
case constant.RelayModeCompletions:
|
||||
case RelayModeCompletions:
|
||||
var streamResponse CompletionsStreamResponse
|
||||
err := json.Unmarshal([]byte(data), &streamResponse)
|
||||
if err != nil {
|
||||
@@ -67,7 +66,7 @@ func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*ErrorWi
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
common.SetEventStreamHeaders(c)
|
||||
setEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
@@ -84,29 +83,29 @@ func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*ErrorWi
|
||||
})
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
return nil, responseText
|
||||
}
|
||||
|
||||
func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*ErrorWithStatusCode, *Usage) {
|
||||
var textResponse SlimTextResponse
|
||||
func openaiHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
var textResponse TextResponse
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &textResponse)
|
||||
if err != nil {
|
||||
return ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
if textResponse.Error.Type != "" {
|
||||
return &ErrorWithStatusCode{
|
||||
Error: textResponse.Error,
|
||||
StatusCode: resp.StatusCode,
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: textResponse.Error,
|
||||
StatusCode: resp.StatusCode,
|
||||
}, nil
|
||||
}
|
||||
// Reset response body
|
||||
@@ -114,7 +113,7 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string
|
||||
|
||||
// We shouldn't set the header before we parse the response body, because the parse part may fail.
|
||||
// And then we will have to send an error response, but in this case, the header has already been set.
|
||||
// So the HTTPClient will be confused by the response.
|
||||
// So the httpClient will be confused by the response.
|
||||
// For example, Postman will report error, and we cannot check the response at all.
|
||||
for k, v := range resp.Header {
|
||||
c.Writer.Header().Set(k, v[0])
|
||||
@@ -122,17 +121,17 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
_, err = io.Copy(c.Writer, resp.Body)
|
||||
if err != nil {
|
||||
return ErrorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
|
||||
if textResponse.Usage.TotalTokens == 0 {
|
||||
completionTokens := 0
|
||||
for _, choice := range textResponse.Choices {
|
||||
completionTokens += CountTokenText(choice.Message.StringContent(), model)
|
||||
completionTokens += countTokenText(choice.Message.StringContent(), model)
|
||||
}
|
||||
textResponse.Usage = Usage{
|
||||
PromptTokens: promptTokens,
|
||||
@@ -1,4 +1,4 @@
|
||||
package google
|
||||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@@ -7,14 +7,47 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/relay/channel/openai"
|
||||
"one-api/relay/constant"
|
||||
)
|
||||
|
||||
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage#request-body
|
||||
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage#response-body
|
||||
|
||||
func ConvertPaLMRequest(textRequest openai.GeneralOpenAIRequest) *PaLMChatRequest {
|
||||
type PaLMChatMessage struct {
|
||||
Author string `json:"author"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type PaLMFilter struct {
|
||||
Reason string `json:"reason"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type PaLMPrompt struct {
|
||||
Messages []PaLMChatMessage `json:"messages"`
|
||||
}
|
||||
|
||||
type PaLMChatRequest struct {
|
||||
Prompt PaLMPrompt `json:"prompt"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
CandidateCount int `json:"candidateCount,omitempty"`
|
||||
TopP float64 `json:"topP,omitempty"`
|
||||
TopK int `json:"topK,omitempty"`
|
||||
}
|
||||
|
||||
type PaLMError struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
type PaLMChatResponse struct {
|
||||
Candidates []PaLMChatMessage `json:"candidates"`
|
||||
Messages []Message `json:"messages"`
|
||||
Filters []PaLMFilter `json:"filters"`
|
||||
Error PaLMError `json:"error"`
|
||||
}
|
||||
|
||||
func requestOpenAI2PaLM(textRequest GeneralOpenAIRequest) *PaLMChatRequest {
|
||||
palmRequest := PaLMChatRequest{
|
||||
Prompt: PaLMPrompt{
|
||||
Messages: make([]PaLMChatMessage, 0, len(textRequest.Messages)),
|
||||
@@ -38,14 +71,14 @@ func ConvertPaLMRequest(textRequest openai.GeneralOpenAIRequest) *PaLMChatReques
|
||||
return &palmRequest
|
||||
}
|
||||
|
||||
func responsePaLM2OpenAI(response *PaLMChatResponse) *openai.TextResponse {
|
||||
fullTextResponse := openai.TextResponse{
|
||||
Choices: make([]openai.TextResponseChoice, 0, len(response.Candidates)),
|
||||
func responsePaLM2OpenAI(response *PaLMChatResponse) *OpenAITextResponse {
|
||||
fullTextResponse := OpenAITextResponse{
|
||||
Choices: make([]OpenAITextResponseChoice, 0, len(response.Candidates)),
|
||||
}
|
||||
for i, candidate := range response.Candidates {
|
||||
choice := openai.TextResponseChoice{
|
||||
choice := OpenAITextResponseChoice{
|
||||
Index: i,
|
||||
Message: openai.Message{
|
||||
Message: Message{
|
||||
Role: "assistant",
|
||||
Content: candidate.Content,
|
||||
},
|
||||
@@ -56,20 +89,20 @@ func responsePaLM2OpenAI(response *PaLMChatResponse) *openai.TextResponse {
|
||||
return &fullTextResponse
|
||||
}
|
||||
|
||||
func streamResponsePaLM2OpenAI(palmResponse *PaLMChatResponse) *openai.ChatCompletionsStreamResponse {
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
func streamResponsePaLM2OpenAI(palmResponse *PaLMChatResponse) *ChatCompletionsStreamResponse {
|
||||
var choice ChatCompletionsStreamResponseChoice
|
||||
if len(palmResponse.Candidates) > 0 {
|
||||
choice.Delta.Content = palmResponse.Candidates[0].Content
|
||||
}
|
||||
choice.FinishReason = &constant.StopFinishReason
|
||||
var response openai.ChatCompletionsStreamResponse
|
||||
choice.FinishReason = &stopFinishReason
|
||||
var response ChatCompletionsStreamResponse
|
||||
response.Object = "chat.completion.chunk"
|
||||
response.Model = "palm2"
|
||||
response.Choices = []openai.ChatCompletionsStreamResponseChoice{choice}
|
||||
response.Choices = []ChatCompletionsStreamResponseChoice{choice}
|
||||
return &response
|
||||
}
|
||||
|
||||
func PaLMStreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, string) {
|
||||
func palmStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, string) {
|
||||
responseText := ""
|
||||
responseId := fmt.Sprintf("chatcmpl-%s", common.GetUUID())
|
||||
createdTime := common.GetTimestamp()
|
||||
@@ -110,7 +143,7 @@ func PaLMStreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithSt
|
||||
dataChan <- string(jsonResponse)
|
||||
stopChan <- true
|
||||
}()
|
||||
common.SetEventStreamHeaders(c)
|
||||
setEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
@@ -123,28 +156,28 @@ func PaLMStreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithSt
|
||||
})
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
return nil, responseText
|
||||
}
|
||||
|
||||
func PaLMHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
func palmHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
var palmResponse PaLMChatResponse
|
||||
err = json.Unmarshal(responseBody, &palmResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
if palmResponse.Error.Code != 0 || len(palmResponse.Candidates) == 0 {
|
||||
return &openai.ErrorWithStatusCode{
|
||||
Error: openai.Error{
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: OpenAIError{
|
||||
Message: palmResponse.Error.Message,
|
||||
Type: palmResponse.Error.Status,
|
||||
Param: "",
|
||||
@@ -154,9 +187,8 @@ func PaLMHandler(c *gin.Context, resp *http.Response, promptTokens int, model st
|
||||
}, nil
|
||||
}
|
||||
fullTextResponse := responsePaLM2OpenAI(&palmResponse)
|
||||
fullTextResponse.Model = model
|
||||
completionTokens := openai.CountTokenText(palmResponse.Candidates[0].Content, model)
|
||||
usage := openai.Usage{
|
||||
completionTokens := countTokenText(palmResponse.Candidates[0].Content, model)
|
||||
usage := Usage{
|
||||
PromptTokens: promptTokens,
|
||||
CompletionTokens: completionTokens,
|
||||
TotalTokens: promptTokens + completionTokens,
|
||||
@@ -164,7 +196,7 @@ func PaLMHandler(c *gin.Context, resp *http.Response, promptTokens int, model st
|
||||
fullTextResponse.Usage = usage
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
c.Writer.Header().Set("Content-Type", "application/json")
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
@@ -1,4 +1,4 @@
|
||||
package tencent
|
||||
package controller
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -12,8 +12,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/relay/channel/openai"
|
||||
"one-api/relay/constant"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -21,22 +19,80 @@ import (
|
||||
|
||||
// https://cloud.tencent.com/document/product/1729/97732
|
||||
|
||||
func ConvertRequest(request openai.GeneralOpenAIRequest) *ChatRequest {
|
||||
messages := make([]Message, 0, len(request.Messages))
|
||||
type TencentMessage struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type TencentChatRequest struct {
|
||||
AppId int64 `json:"app_id"` // 腾讯云账号的 APPID
|
||||
SecretId string `json:"secret_id"` // 官网 SecretId
|
||||
// Timestamp当前 UNIX 时间戳,单位为秒,可记录发起 API 请求的时间。
|
||||
// 例如1529223702,如果与当前时间相差过大,会引起签名过期错误
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
// Expired 签名的有效期,是一个符合 UNIX Epoch 时间戳规范的数值,
|
||||
// 单位为秒;Expired 必须大于 Timestamp 且 Expired-Timestamp 小于90天
|
||||
Expired int64 `json:"expired"`
|
||||
QueryID string `json:"query_id"` //请求 Id,用于问题排查
|
||||
// Temperature 较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定
|
||||
// 默认 1.0,取值区间为[0.0,2.0],非必要不建议使用,不合理的取值会影响效果
|
||||
// 建议该参数和 top_p 只设置1个,不要同时更改 top_p
|
||||
Temperature float64 `json:"temperature"`
|
||||
// TopP 影响输出文本的多样性,取值越大,生成文本的多样性越强
|
||||
// 默认1.0,取值区间为[0.0, 1.0],非必要不建议使用, 不合理的取值会影响效果
|
||||
// 建议该参数和 temperature 只设置1个,不要同时更改
|
||||
TopP float64 `json:"top_p"`
|
||||
// Stream 0:同步,1:流式 (默认,协议:SSE)
|
||||
// 同步请求超时:60s,如果内容较长建议使用流式
|
||||
Stream int `json:"stream"`
|
||||
// Messages 会话内容, 长度最多为40, 按对话时间从旧到新在数组中排列
|
||||
// 输入 content 总数最大支持 3000 token。
|
||||
Messages []TencentMessage `json:"messages"`
|
||||
}
|
||||
|
||||
type TencentError struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type TencentUsage struct {
|
||||
InputTokens int `json:"input_tokens"`
|
||||
OutputTokens int `json:"output_tokens"`
|
||||
TotalTokens int `json:"total_tokens"`
|
||||
}
|
||||
|
||||
type TencentResponseChoices struct {
|
||||
FinishReason string `json:"finish_reason,omitempty"` // 流式结束标志位,为 stop 则表示尾包
|
||||
Messages TencentMessage `json:"messages,omitempty"` // 内容,同步模式返回内容,流模式为 null 输出 content 内容总数最多支持 1024token。
|
||||
Delta TencentMessage `json:"delta,omitempty"` // 内容,流模式返回内容,同步模式为 null 输出 content 内容总数最多支持 1024token。
|
||||
}
|
||||
|
||||
type TencentChatResponse struct {
|
||||
Choices []TencentResponseChoices `json:"choices,omitempty"` // 结果
|
||||
Created string `json:"created,omitempty"` // unix 时间戳的字符串
|
||||
Id string `json:"id,omitempty"` // 会话 id
|
||||
Usage Usage `json:"usage,omitempty"` // token 数量
|
||||
Error TencentError `json:"error,omitempty"` // 错误信息 注意:此字段可能返回 null,表示取不到有效值
|
||||
Note string `json:"note,omitempty"` // 注释
|
||||
ReqID string `json:"req_id,omitempty"` // 唯一请求 Id,每次请求都会返回。用于反馈接口入参
|
||||
}
|
||||
|
||||
func requestOpenAI2Tencent(request GeneralOpenAIRequest) *TencentChatRequest {
|
||||
messages := make([]TencentMessage, 0, len(request.Messages))
|
||||
for i := 0; i < len(request.Messages); i++ {
|
||||
message := request.Messages[i]
|
||||
if message.Role == "system" {
|
||||
messages = append(messages, Message{
|
||||
messages = append(messages, TencentMessage{
|
||||
Role: "user",
|
||||
Content: message.StringContent(),
|
||||
})
|
||||
messages = append(messages, Message{
|
||||
messages = append(messages, TencentMessage{
|
||||
Role: "assistant",
|
||||
Content: "Okay",
|
||||
})
|
||||
continue
|
||||
}
|
||||
messages = append(messages, Message{
|
||||
messages = append(messages, TencentMessage{
|
||||
Content: message.StringContent(),
|
||||
Role: message.Role,
|
||||
})
|
||||
@@ -45,7 +101,7 @@ func ConvertRequest(request openai.GeneralOpenAIRequest) *ChatRequest {
|
||||
if request.Stream {
|
||||
stream = 1
|
||||
}
|
||||
return &ChatRequest{
|
||||
return &TencentChatRequest{
|
||||
Timestamp: common.GetTimestamp(),
|
||||
Expired: common.GetTimestamp() + 24*60*60,
|
||||
QueryID: common.GetUUID(),
|
||||
@@ -56,16 +112,16 @@ func ConvertRequest(request openai.GeneralOpenAIRequest) *ChatRequest {
|
||||
}
|
||||
}
|
||||
|
||||
func responseTencent2OpenAI(response *ChatResponse) *openai.TextResponse {
|
||||
fullTextResponse := openai.TextResponse{
|
||||
func responseTencent2OpenAI(response *TencentChatResponse) *OpenAITextResponse {
|
||||
fullTextResponse := OpenAITextResponse{
|
||||
Object: "chat.completion",
|
||||
Created: common.GetTimestamp(),
|
||||
Usage: response.Usage,
|
||||
}
|
||||
if len(response.Choices) > 0 {
|
||||
choice := openai.TextResponseChoice{
|
||||
choice := OpenAITextResponseChoice{
|
||||
Index: 0,
|
||||
Message: openai.Message{
|
||||
Message: Message{
|
||||
Role: "assistant",
|
||||
Content: response.Choices[0].Messages.Content,
|
||||
},
|
||||
@@ -76,24 +132,24 @@ func responseTencent2OpenAI(response *ChatResponse) *openai.TextResponse {
|
||||
return &fullTextResponse
|
||||
}
|
||||
|
||||
func streamResponseTencent2OpenAI(TencentResponse *ChatResponse) *openai.ChatCompletionsStreamResponse {
|
||||
response := openai.ChatCompletionsStreamResponse{
|
||||
func streamResponseTencent2OpenAI(TencentResponse *TencentChatResponse) *ChatCompletionsStreamResponse {
|
||||
response := ChatCompletionsStreamResponse{
|
||||
Object: "chat.completion.chunk",
|
||||
Created: common.GetTimestamp(),
|
||||
Model: "tencent-hunyuan",
|
||||
}
|
||||
if len(TencentResponse.Choices) > 0 {
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
var choice ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = TencentResponse.Choices[0].Delta.Content
|
||||
if TencentResponse.Choices[0].FinishReason == "stop" {
|
||||
choice.FinishReason = &constant.StopFinishReason
|
||||
choice.FinishReason = &stopFinishReason
|
||||
}
|
||||
response.Choices = append(response.Choices, choice)
|
||||
}
|
||||
return &response
|
||||
}
|
||||
|
||||
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, string) {
|
||||
func tencentStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, string) {
|
||||
var responseText string
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
@@ -124,11 +180,11 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
common.SetEventStreamHeaders(c)
|
||||
setEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
var TencentResponse ChatResponse
|
||||
var TencentResponse TencentChatResponse
|
||||
err := json.Unmarshal([]byte(data), &TencentResponse)
|
||||
if err != nil {
|
||||
common.SysError("error unmarshalling stream response: " + err.Error())
|
||||
@@ -152,28 +208,28 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
})
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
return nil, responseText
|
||||
}
|
||||
|
||||
func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
var TencentResponse ChatResponse
|
||||
func tencentHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
var TencentResponse TencentChatResponse
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &TencentResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
if TencentResponse.Error.Code != 0 {
|
||||
return &openai.ErrorWithStatusCode{
|
||||
Error: openai.Error{
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: OpenAIError{
|
||||
Message: TencentResponse.Error.Message,
|
||||
Code: TencentResponse.Error.Code,
|
||||
},
|
||||
@@ -181,10 +237,9 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
|
||||
}, nil
|
||||
}
|
||||
fullTextResponse := responseTencent2OpenAI(&TencentResponse)
|
||||
fullTextResponse.Model = "hunyuan"
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
c.Writer.Header().Set("Content-Type", "application/json")
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
@@ -192,7 +247,7 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
|
||||
return nil, &fullTextResponse.Usage
|
||||
}
|
||||
|
||||
func ParseConfig(config string) (appId int64, secretId string, secretKey string, err error) {
|
||||
func parseTencentConfig(config string) (appId int64, secretId string, secretKey string, err error) {
|
||||
parts := strings.Split(config, "|")
|
||||
if len(parts) != 3 {
|
||||
err = errors.New("invalid tencent config")
|
||||
@@ -204,7 +259,7 @@ func ParseConfig(config string) (appId int64, secretId string, secretKey string,
|
||||
return
|
||||
}
|
||||
|
||||
func GetSign(req ChatRequest, secretKey string) string {
|
||||
func getTencentSign(req TencentChatRequest, secretKey string) string {
|
||||
params := make([]string, 0)
|
||||
params = append(params, "app_id="+strconv.FormatInt(req.AppId, 10))
|
||||
params = append(params, "secret_id="+req.SecretId)
|
||||
@@ -6,24 +6,15 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/relay/channel/aiproxy"
|
||||
"one-api/relay/channel/ali"
|
||||
"one-api/relay/channel/anthropic"
|
||||
"one-api/relay/channel/baidu"
|
||||
"one-api/relay/channel/google"
|
||||
"one-api/relay/channel/openai"
|
||||
"one-api/relay/channel/tencent"
|
||||
"one-api/relay/channel/xunfei"
|
||||
"one-api/relay/channel/zhipu"
|
||||
"one-api/relay/constant"
|
||||
"one-api/relay/util"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -39,47 +30,64 @@ const (
|
||||
APITypeGemini
|
||||
)
|
||||
|
||||
func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode {
|
||||
var httpClient *http.Client
|
||||
var impatientHTTPClient *http.Client
|
||||
|
||||
func init() {
|
||||
if common.RelayTimeout == 0 {
|
||||
httpClient = &http.Client{}
|
||||
} else {
|
||||
httpClient = &http.Client{
|
||||
Timeout: time.Duration(common.RelayTimeout) * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
impatientHTTPClient = &http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
channelType := c.GetInt("channel")
|
||||
channelId := c.GetInt("channel_id")
|
||||
tokenId := c.GetInt("token_id")
|
||||
userId := c.GetInt("id")
|
||||
group := c.GetString("group")
|
||||
var textRequest openai.GeneralOpenAIRequest
|
||||
var textRequest GeneralOpenAIRequest
|
||||
err := common.UnmarshalBodyReusable(c, &textRequest)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
if textRequest.MaxTokens < 0 || textRequest.MaxTokens > math.MaxInt32/2 {
|
||||
return openai.ErrorWrapper(errors.New("max_tokens is invalid"), "invalid_max_tokens", http.StatusBadRequest)
|
||||
return errorWrapper(errors.New("max_tokens is invalid"), "invalid_max_tokens", http.StatusBadRequest)
|
||||
}
|
||||
if relayMode == constant.RelayModeModerations && textRequest.Model == "" {
|
||||
if relayMode == RelayModeModerations && textRequest.Model == "" {
|
||||
textRequest.Model = "text-moderation-latest"
|
||||
}
|
||||
if relayMode == constant.RelayModeEmbeddings && textRequest.Model == "" {
|
||||
if relayMode == RelayModeEmbeddings && textRequest.Model == "" {
|
||||
textRequest.Model = c.Param("model")
|
||||
}
|
||||
// request validation
|
||||
if textRequest.Model == "" {
|
||||
return openai.ErrorWrapper(errors.New("model is required"), "required_field_missing", http.StatusBadRequest)
|
||||
return errorWrapper(errors.New("model is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
switch relayMode {
|
||||
case constant.RelayModeCompletions:
|
||||
case RelayModeCompletions:
|
||||
if textRequest.Prompt == "" {
|
||||
return openai.ErrorWrapper(errors.New("field prompt is required"), "required_field_missing", http.StatusBadRequest)
|
||||
return errorWrapper(errors.New("field prompt is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
case constant.RelayModeChatCompletions:
|
||||
case RelayModeChatCompletions:
|
||||
if textRequest.Messages == nil || len(textRequest.Messages) == 0 {
|
||||
return openai.ErrorWrapper(errors.New("field messages is required"), "required_field_missing", http.StatusBadRequest)
|
||||
return errorWrapper(errors.New("field messages is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
case constant.RelayModeEmbeddings:
|
||||
case constant.RelayModeModerations:
|
||||
case RelayModeEmbeddings:
|
||||
case RelayModeModerations:
|
||||
if textRequest.Input == "" {
|
||||
return openai.ErrorWrapper(errors.New("field input is required"), "required_field_missing", http.StatusBadRequest)
|
||||
return errorWrapper(errors.New("field input is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
case constant.RelayModeEdits:
|
||||
case RelayModeEdits:
|
||||
if textRequest.Instruction == "" {
|
||||
return openai.ErrorWrapper(errors.New("field instruction is required"), "required_field_missing", http.StatusBadRequest)
|
||||
return errorWrapper(errors.New("field instruction is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
// map model name
|
||||
@@ -89,7 +97,7 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
modelMap := make(map[string]string)
|
||||
err := json.Unmarshal([]byte(modelMapping), &modelMap)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
|
||||
}
|
||||
if modelMap[textRequest.Model] != "" {
|
||||
textRequest.Model = modelMap[textRequest.Model]
|
||||
@@ -122,12 +130,12 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
if c.GetString("base_url") != "" {
|
||||
baseURL = c.GetString("base_url")
|
||||
}
|
||||
fullRequestURL := util.GetFullRequestURL(baseURL, requestURL, channelType)
|
||||
fullRequestURL := getFullRequestURL(baseURL, requestURL, channelType)
|
||||
switch apiType {
|
||||
case APITypeOpenAI:
|
||||
if channelType == common.ChannelTypeAzure {
|
||||
// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api
|
||||
apiVersion := util.GetAPIVersion(c)
|
||||
apiVersion := GetAPIVersion(c)
|
||||
requestURL := strings.Split(requestURL, "?")[0]
|
||||
requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, apiVersion)
|
||||
baseURL = c.GetString("base_url")
|
||||
@@ -140,7 +148,7 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
model_ = strings.TrimSuffix(model_, "-0613")
|
||||
|
||||
requestURL = fmt.Sprintf("/openai/deployments/%s/%s", model_, task)
|
||||
fullRequestURL = util.GetFullRequestURL(baseURL, requestURL, channelType)
|
||||
fullRequestURL = getFullRequestURL(baseURL, requestURL, channelType)
|
||||
}
|
||||
case APITypeClaude:
|
||||
fullRequestURL = "https://api.anthropic.com/v1/complete"
|
||||
@@ -163,8 +171,8 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
apiKey := c.Request.Header.Get("Authorization")
|
||||
apiKey = strings.TrimPrefix(apiKey, "Bearer ")
|
||||
var err error
|
||||
if apiKey, err = baidu.GetAccessToken(apiKey); err != nil {
|
||||
return openai.ErrorWrapper(err, "invalid_baidu_config", http.StatusInternalServerError)
|
||||
if apiKey, err = getBaiduAccessToken(apiKey); err != nil {
|
||||
return errorWrapper(err, "invalid_baidu_config", http.StatusInternalServerError)
|
||||
}
|
||||
fullRequestURL += "?access_token=" + apiKey
|
||||
case APITypePaLM:
|
||||
@@ -172,6 +180,9 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
if baseURL != "" {
|
||||
fullRequestURL = fmt.Sprintf("%s/v1beta2/models/chat-bison-001:generateMessage", baseURL)
|
||||
}
|
||||
apiKey := c.Request.Header.Get("Authorization")
|
||||
apiKey = strings.TrimPrefix(apiKey, "Bearer ")
|
||||
fullRequestURL += "?key=" + apiKey
|
||||
case APITypeGemini:
|
||||
requestBaseURL := "https://generativelanguage.googleapis.com"
|
||||
if baseURL != "" {
|
||||
@@ -186,6 +197,9 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
action = "streamGenerateContent"
|
||||
}
|
||||
fullRequestURL = fmt.Sprintf("%s/%s/models/%s:%s", requestBaseURL, version, textRequest.Model, action)
|
||||
apiKey := c.Request.Header.Get("Authorization")
|
||||
apiKey = strings.TrimPrefix(apiKey, "Bearer ")
|
||||
fullRequestURL += "?key=" + apiKey
|
||||
case APITypeZhipu:
|
||||
method := "invoke"
|
||||
if textRequest.Stream {
|
||||
@@ -194,7 +208,7 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
fullRequestURL = fmt.Sprintf("https://open.bigmodel.cn/api/paas/v3/model-api/%s/%s", textRequest.Model, method)
|
||||
case APITypeAli:
|
||||
fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
|
||||
if relayMode == constant.RelayModeEmbeddings {
|
||||
if relayMode == RelayModeEmbeddings {
|
||||
fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/embeddings/text-embedding/text-embedding"
|
||||
}
|
||||
case APITypeTencent:
|
||||
@@ -205,12 +219,12 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
var promptTokens int
|
||||
var completionTokens int
|
||||
switch relayMode {
|
||||
case constant.RelayModeChatCompletions:
|
||||
promptTokens = openai.CountTokenMessages(textRequest.Messages, textRequest.Model)
|
||||
case constant.RelayModeCompletions:
|
||||
promptTokens = openai.CountTokenInput(textRequest.Prompt, textRequest.Model)
|
||||
case constant.RelayModeModerations:
|
||||
promptTokens = openai.CountTokenInput(textRequest.Input, textRequest.Model)
|
||||
case RelayModeChatCompletions:
|
||||
promptTokens = countTokenMessages(textRequest.Messages, textRequest.Model)
|
||||
case RelayModeCompletions:
|
||||
promptTokens = countTokenInput(textRequest.Prompt, textRequest.Model)
|
||||
case RelayModeModerations:
|
||||
promptTokens = countTokenInput(textRequest.Input, textRequest.Model)
|
||||
}
|
||||
preConsumedTokens := common.PreConsumedQuota
|
||||
if textRequest.MaxTokens != 0 {
|
||||
@@ -222,14 +236,14 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
preConsumedQuota := int(float64(preConsumedTokens) * ratio)
|
||||
userQuota, err := model.CacheGetUserQuota(userId)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
|
||||
}
|
||||
if userQuota-preConsumedQuota < 0 {
|
||||
return openai.ErrorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||
return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||
}
|
||||
err = model.CacheDecreaseUserQuota(userId, preConsumedQuota)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError)
|
||||
}
|
||||
if userQuota > 100*preConsumedQuota {
|
||||
// in this case, we do not pre-consume quota
|
||||
@@ -240,14 +254,14 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
if preConsumedQuota > 0 {
|
||||
err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
|
||||
return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
|
||||
}
|
||||
}
|
||||
var requestBody io.Reader
|
||||
if isModelMapped {
|
||||
jsonStr, err := json.Marshal(textRequest)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
requestBody = bytes.NewBuffer(jsonStr)
|
||||
} else {
|
||||
@@ -255,86 +269,86 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}
|
||||
switch apiType {
|
||||
case APITypeClaude:
|
||||
claudeRequest := anthropic.ConvertRequest(textRequest)
|
||||
claudeRequest := requestOpenAI2Claude(textRequest)
|
||||
jsonStr, err := json.Marshal(claudeRequest)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
requestBody = bytes.NewBuffer(jsonStr)
|
||||
case APITypeBaidu:
|
||||
var jsonData []byte
|
||||
var err error
|
||||
switch relayMode {
|
||||
case constant.RelayModeEmbeddings:
|
||||
baiduEmbeddingRequest := baidu.ConvertEmbeddingRequest(textRequest)
|
||||
case RelayModeEmbeddings:
|
||||
baiduEmbeddingRequest := embeddingRequestOpenAI2Baidu(textRequest)
|
||||
jsonData, err = json.Marshal(baiduEmbeddingRequest)
|
||||
default:
|
||||
baiduRequest := baidu.ConvertRequest(textRequest)
|
||||
baiduRequest := requestOpenAI2Baidu(textRequest)
|
||||
jsonData, err = json.Marshal(baiduRequest)
|
||||
}
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
requestBody = bytes.NewBuffer(jsonData)
|
||||
case APITypePaLM:
|
||||
palmRequest := google.ConvertPaLMRequest(textRequest)
|
||||
palmRequest := requestOpenAI2PaLM(textRequest)
|
||||
jsonStr, err := json.Marshal(palmRequest)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
requestBody = bytes.NewBuffer(jsonStr)
|
||||
case APITypeGemini:
|
||||
geminiChatRequest := google.ConvertGeminiRequest(textRequest)
|
||||
geminiChatRequest := requestOpenAI2Gemini(textRequest)
|
||||
jsonStr, err := json.Marshal(geminiChatRequest)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
requestBody = bytes.NewBuffer(jsonStr)
|
||||
case APITypeZhipu:
|
||||
zhipuRequest := zhipu.ConvertRequest(textRequest)
|
||||
zhipuRequest := requestOpenAI2Zhipu(textRequest)
|
||||
jsonStr, err := json.Marshal(zhipuRequest)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
requestBody = bytes.NewBuffer(jsonStr)
|
||||
case APITypeAli:
|
||||
var jsonStr []byte
|
||||
var err error
|
||||
switch relayMode {
|
||||
case constant.RelayModeEmbeddings:
|
||||
aliEmbeddingRequest := ali.ConvertEmbeddingRequest(textRequest)
|
||||
case RelayModeEmbeddings:
|
||||
aliEmbeddingRequest := embeddingRequestOpenAI2Ali(textRequest)
|
||||
jsonStr, err = json.Marshal(aliEmbeddingRequest)
|
||||
default:
|
||||
aliRequest := ali.ConvertRequest(textRequest)
|
||||
aliRequest := requestOpenAI2Ali(textRequest)
|
||||
jsonStr, err = json.Marshal(aliRequest)
|
||||
}
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
requestBody = bytes.NewBuffer(jsonStr)
|
||||
case APITypeTencent:
|
||||
apiKey := c.Request.Header.Get("Authorization")
|
||||
apiKey = strings.TrimPrefix(apiKey, "Bearer ")
|
||||
appId, secretId, secretKey, err := tencent.ParseConfig(apiKey)
|
||||
appId, secretId, secretKey, err := parseTencentConfig(apiKey)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "invalid_tencent_config", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "invalid_tencent_config", http.StatusInternalServerError)
|
||||
}
|
||||
tencentRequest := tencent.ConvertRequest(textRequest)
|
||||
tencentRequest := requestOpenAI2Tencent(textRequest)
|
||||
tencentRequest.AppId = appId
|
||||
tencentRequest.SecretId = secretId
|
||||
jsonStr, err := json.Marshal(tencentRequest)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
sign := tencent.GetSign(*tencentRequest, secretKey)
|
||||
sign := getTencentSign(*tencentRequest, secretKey)
|
||||
c.Request.Header.Set("Authorization", sign)
|
||||
requestBody = bytes.NewBuffer(jsonStr)
|
||||
case APITypeAIProxyLibrary:
|
||||
aiProxyLibraryRequest := aiproxy.ConvertRequest(textRequest)
|
||||
aiProxyLibraryRequest := requestOpenAI2AIProxyLibrary(textRequest)
|
||||
aiProxyLibraryRequest.LibraryId = c.GetString("library_id")
|
||||
jsonStr, err := json.Marshal(aiProxyLibraryRequest)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
requestBody = bytes.NewBuffer(jsonStr)
|
||||
}
|
||||
@@ -346,7 +360,7 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
if apiType != APITypeXunfei { // cause xunfei use websocket
|
||||
req, err = http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
apiKey := c.Request.Header.Get("Authorization")
|
||||
apiKey = strings.TrimPrefix(apiKey, "Bearer ")
|
||||
@@ -369,7 +383,7 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}
|
||||
req.Header.Set("anthropic-version", anthropicVersion)
|
||||
case APITypeZhipu:
|
||||
token := zhipu.GetToken(apiKey)
|
||||
token := getZhipuToken(apiKey)
|
||||
req.Header.Set("Authorization", token)
|
||||
case APITypeAli:
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
@@ -382,9 +396,9 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
case APITypeTencent:
|
||||
req.Header.Set("Authorization", apiKey)
|
||||
case APITypePaLM:
|
||||
req.Header.Set("x-goog-api-key", apiKey)
|
||||
// do not set Authorization header
|
||||
case APITypeGemini:
|
||||
req.Header.Set("x-goog-api-key", apiKey)
|
||||
// do not set Authorization header
|
||||
default:
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
}
|
||||
@@ -394,17 +408,17 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
req.Header.Set("Accept", "text/event-stream")
|
||||
}
|
||||
//req.Header.Set("Connection", c.Request.Header.Get("Connection"))
|
||||
resp, err = util.HTTPClient.Do(req)
|
||||
resp, err = httpClient.Do(req)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "do_request_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "do_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = req.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = c.Request.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||
return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
isStream = isStream || strings.HasPrefix(resp.Header.Get("Content-Type"), "text/event-stream")
|
||||
|
||||
@@ -418,11 +432,11 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
return util.RelayErrorHandler(resp)
|
||||
return relayErrorHandler(resp)
|
||||
}
|
||||
}
|
||||
|
||||
var textResponse openai.SlimTextResponse
|
||||
var textResponse TextResponse
|
||||
tokenName := c.GetString("token_name")
|
||||
|
||||
defer func(ctx context.Context) {
|
||||
@@ -463,15 +477,15 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
switch apiType {
|
||||
case APITypeOpenAI:
|
||||
if isStream {
|
||||
err, responseText := openai.StreamHandler(c, resp, relayMode)
|
||||
err, responseText := openaiStreamHandler(c, resp, relayMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
textResponse.Usage.PromptTokens = promptTokens
|
||||
textResponse.Usage.CompletionTokens = openai.CountTokenText(responseText, textRequest.Model)
|
||||
textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
|
||||
return nil
|
||||
} else {
|
||||
err, usage := openai.Handler(c, resp, promptTokens, textRequest.Model)
|
||||
err, usage := openaiHandler(c, resp, promptTokens, textRequest.Model)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -482,15 +496,15 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}
|
||||
case APITypeClaude:
|
||||
if isStream {
|
||||
err, responseText := anthropic.StreamHandler(c, resp)
|
||||
err, responseText := claudeStreamHandler(c, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
textResponse.Usage.PromptTokens = promptTokens
|
||||
textResponse.Usage.CompletionTokens = openai.CountTokenText(responseText, textRequest.Model)
|
||||
textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
|
||||
return nil
|
||||
} else {
|
||||
err, usage := anthropic.Handler(c, resp, promptTokens, textRequest.Model)
|
||||
err, usage := claudeHandler(c, resp, promptTokens, textRequest.Model)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -501,7 +515,7 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}
|
||||
case APITypeBaidu:
|
||||
if isStream {
|
||||
err, usage := baidu.StreamHandler(c, resp)
|
||||
err, usage := baiduStreamHandler(c, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -510,13 +524,13 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
var err *openai.ErrorWithStatusCode
|
||||
var usage *openai.Usage
|
||||
var err *OpenAIErrorWithStatusCode
|
||||
var usage *Usage
|
||||
switch relayMode {
|
||||
case constant.RelayModeEmbeddings:
|
||||
err, usage = baidu.EmbeddingHandler(c, resp)
|
||||
case RelayModeEmbeddings:
|
||||
err, usage = baiduEmbeddingHandler(c, resp)
|
||||
default:
|
||||
err, usage = baidu.Handler(c, resp)
|
||||
err, usage = baiduHandler(c, resp)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -528,15 +542,15 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}
|
||||
case APITypePaLM:
|
||||
if textRequest.Stream { // PaLM2 API does not support stream
|
||||
err, responseText := google.PaLMStreamHandler(c, resp)
|
||||
err, responseText := palmStreamHandler(c, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
textResponse.Usage.PromptTokens = promptTokens
|
||||
textResponse.Usage.CompletionTokens = openai.CountTokenText(responseText, textRequest.Model)
|
||||
textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
|
||||
return nil
|
||||
} else {
|
||||
err, usage := google.PaLMHandler(c, resp, promptTokens, textRequest.Model)
|
||||
err, usage := palmHandler(c, resp, promptTokens, textRequest.Model)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -547,15 +561,15 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}
|
||||
case APITypeGemini:
|
||||
if textRequest.Stream {
|
||||
err, responseText := google.StreamHandler(c, resp)
|
||||
err, responseText := geminiChatStreamHandler(c, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
textResponse.Usage.PromptTokens = promptTokens
|
||||
textResponse.Usage.CompletionTokens = openai.CountTokenText(responseText, textRequest.Model)
|
||||
textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
|
||||
return nil
|
||||
} else {
|
||||
err, usage := google.GeminiHandler(c, resp, promptTokens, textRequest.Model)
|
||||
err, usage := geminiChatHandler(c, resp, promptTokens, textRequest.Model)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -566,7 +580,7 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}
|
||||
case APITypeZhipu:
|
||||
if isStream {
|
||||
err, usage := zhipu.StreamHandler(c, resp)
|
||||
err, usage := zhipuStreamHandler(c, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -577,7 +591,7 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
textResponse.Usage.PromptTokens = textResponse.Usage.TotalTokens
|
||||
return nil
|
||||
} else {
|
||||
err, usage := zhipu.Handler(c, resp)
|
||||
err, usage := zhipuHandler(c, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -590,7 +604,7 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}
|
||||
case APITypeAli:
|
||||
if isStream {
|
||||
err, usage := ali.StreamHandler(c, resp)
|
||||
err, usage := aliStreamHandler(c, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -599,13 +613,13 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
var err *openai.ErrorWithStatusCode
|
||||
var usage *openai.Usage
|
||||
var err *OpenAIErrorWithStatusCode
|
||||
var usage *Usage
|
||||
switch relayMode {
|
||||
case constant.RelayModeEmbeddings:
|
||||
err, usage = ali.EmbeddingHandler(c, resp)
|
||||
case RelayModeEmbeddings:
|
||||
err, usage = aliEmbeddingHandler(c, resp)
|
||||
default:
|
||||
err, usage = ali.Handler(c, resp)
|
||||
err, usage = aliHandler(c, resp)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -620,14 +634,14 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
auth = strings.TrimPrefix(auth, "Bearer ")
|
||||
splits := strings.Split(auth, "|")
|
||||
if len(splits) != 3 {
|
||||
return openai.ErrorWrapper(errors.New("invalid auth"), "invalid_auth", http.StatusBadRequest)
|
||||
return errorWrapper(errors.New("invalid auth"), "invalid_auth", http.StatusBadRequest)
|
||||
}
|
||||
var err *openai.ErrorWithStatusCode
|
||||
var usage *openai.Usage
|
||||
var err *OpenAIErrorWithStatusCode
|
||||
var usage *Usage
|
||||
if isStream {
|
||||
err, usage = xunfei.StreamHandler(c, textRequest, splits[0], splits[1], splits[2])
|
||||
err, usage = xunfeiStreamHandler(c, textRequest, splits[0], splits[1], splits[2])
|
||||
} else {
|
||||
err, usage = xunfei.Handler(c, textRequest, splits[0], splits[1], splits[2])
|
||||
err, usage = xunfeiHandler(c, textRequest, splits[0], splits[1], splits[2])
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -638,7 +652,7 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
return nil
|
||||
case APITypeAIProxyLibrary:
|
||||
if isStream {
|
||||
err, usage := aiproxy.StreamHandler(c, resp)
|
||||
err, usage := aiProxyLibraryStreamHandler(c, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -647,7 +661,7 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
err, usage := aiproxy.Handler(c, resp)
|
||||
err, usage := aiProxyLibraryHandler(c, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -658,15 +672,15 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
}
|
||||
case APITypeTencent:
|
||||
if isStream {
|
||||
err, responseText := tencent.StreamHandler(c, resp)
|
||||
err, responseText := tencentStreamHandler(c, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
textResponse.Usage.PromptTokens = promptTokens
|
||||
textResponse.Usage.CompletionTokens = openai.CountTokenText(responseText, textRequest.Model)
|
||||
textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
|
||||
return nil
|
||||
} else {
|
||||
err, usage := tencent.Handler(c, resp)
|
||||
err, usage := tencentHandler(c, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -676,6 +690,6 @@ func RelayTextHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
return openai.ErrorWrapper(errors.New("unknown api type"), "unknown_api_type", http.StatusInternalServerError)
|
||||
return errorWrapper(errors.New("unknown api type"), "unknown_api_type", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,25 @@
|
||||
package openai
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/pkoukk/tiktoken-go"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/common/image"
|
||||
"one-api/model"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pkoukk/tiktoken-go"
|
||||
)
|
||||
|
||||
var stopFinishReason = "stop"
|
||||
|
||||
// tokenEncoderMap won't grow after initialization
|
||||
var tokenEncoderMap = map[string]*tiktoken.Tiktoken{}
|
||||
var defaultTokenEncoder *tiktoken.Tiktoken
|
||||
@@ -61,7 +71,7 @@ func getTokenNum(tokenEncoder *tiktoken.Tiktoken, text string) int {
|
||||
return len(tokenEncoder.Encode(text, nil, nil))
|
||||
}
|
||||
|
||||
func CountTokenMessages(messages []Message, model string) int {
|
||||
func countTokenMessages(messages []Message, model string) int {
|
||||
tokenEncoder := getTokenEncoder(model)
|
||||
// Reference:
|
||||
// https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
@@ -185,21 +195,191 @@ func countImageTokens(url string, detail string) (_ int, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func CountTokenInput(input any, model string) int {
|
||||
func countTokenInput(input any, model string) int {
|
||||
switch v := input.(type) {
|
||||
case string:
|
||||
return CountTokenText(v, model)
|
||||
return countTokenText(v, model)
|
||||
case []string:
|
||||
text := ""
|
||||
for _, s := range v {
|
||||
text += s
|
||||
}
|
||||
return CountTokenText(text, model)
|
||||
return countTokenText(text, model)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func CountTokenText(text string, model string) int {
|
||||
func countTokenText(text string, model string) int {
|
||||
tokenEncoder := getTokenEncoder(model)
|
||||
return getTokenNum(tokenEncoder, text)
|
||||
}
|
||||
|
||||
func errorWrapper(err error, code string, statusCode int) *OpenAIErrorWithStatusCode {
|
||||
openAIError := OpenAIError{
|
||||
Message: err.Error(),
|
||||
Type: "one_api_error",
|
||||
Code: code,
|
||||
}
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: openAIError,
|
||||
StatusCode: statusCode,
|
||||
}
|
||||
}
|
||||
|
||||
func shouldDisableChannel(err *OpenAIError, statusCode int) bool {
|
||||
if !common.AutomaticDisableChannelEnabled {
|
||||
return false
|
||||
}
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
if statusCode == http.StatusUnauthorized {
|
||||
return true
|
||||
}
|
||||
if err.Type == "insufficient_quota" || err.Code == "invalid_api_key" || err.Code == "account_deactivated" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func shouldEnableChannel(err error, openAIErr *OpenAIError) bool {
|
||||
if !common.AutomaticEnableChannelEnabled {
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if openAIErr != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func setEventStreamHeaders(c *gin.Context) {
|
||||
c.Writer.Header().Set("Content-Type", "text/event-stream")
|
||||
c.Writer.Header().Set("Cache-Control", "no-cache")
|
||||
c.Writer.Header().Set("Connection", "keep-alive")
|
||||
c.Writer.Header().Set("Transfer-Encoding", "chunked")
|
||||
c.Writer.Header().Set("X-Accel-Buffering", "no")
|
||||
}
|
||||
|
||||
type GeneralErrorResponse struct {
|
||||
Error OpenAIError `json:"error"`
|
||||
Message string `json:"message"`
|
||||
Msg string `json:"msg"`
|
||||
Err string `json:"err"`
|
||||
ErrorMsg string `json:"error_msg"`
|
||||
Header struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"header"`
|
||||
Response struct {
|
||||
Error struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
func (e GeneralErrorResponse) ToMessage() string {
|
||||
if e.Error.Message != "" {
|
||||
return e.Error.Message
|
||||
}
|
||||
if e.Message != "" {
|
||||
return e.Message
|
||||
}
|
||||
if e.Msg != "" {
|
||||
return e.Msg
|
||||
}
|
||||
if e.Err != "" {
|
||||
return e.Err
|
||||
}
|
||||
if e.ErrorMsg != "" {
|
||||
return e.ErrorMsg
|
||||
}
|
||||
if e.Header.Message != "" {
|
||||
return e.Header.Message
|
||||
}
|
||||
if e.Response.Error.Message != "" {
|
||||
return e.Response.Error.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func relayErrorHandler(resp *http.Response) (openAIErrorWithStatusCode *OpenAIErrorWithStatusCode) {
|
||||
openAIErrorWithStatusCode = &OpenAIErrorWithStatusCode{
|
||||
StatusCode: resp.StatusCode,
|
||||
OpenAIError: OpenAIError{
|
||||
Message: "",
|
||||
Type: "upstream_error",
|
||||
Code: "bad_response_status_code",
|
||||
Param: strconv.Itoa(resp.StatusCode),
|
||||
},
|
||||
}
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var errResponse GeneralErrorResponse
|
||||
err = json.Unmarshal(responseBody, &errResponse)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if errResponse.Error.Message != "" {
|
||||
// OpenAI format error, so we override the default one
|
||||
openAIErrorWithStatusCode.OpenAIError = errResponse.Error
|
||||
} else {
|
||||
openAIErrorWithStatusCode.OpenAIError.Message = errResponse.ToMessage()
|
||||
}
|
||||
if openAIErrorWithStatusCode.OpenAIError.Message == "" {
|
||||
openAIErrorWithStatusCode.OpenAIError.Message = fmt.Sprintf("bad response status code %d", resp.StatusCode)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getFullRequestURL(baseURL string, requestURL string, channelType int) string {
|
||||
fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
|
||||
|
||||
if strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") {
|
||||
switch channelType {
|
||||
case common.ChannelTypeOpenAI:
|
||||
fullRequestURL = fmt.Sprintf("%s%s", baseURL, strings.TrimPrefix(requestURL, "/v1"))
|
||||
case common.ChannelTypeAzure:
|
||||
fullRequestURL = fmt.Sprintf("%s%s", baseURL, strings.TrimPrefix(requestURL, "/openai/deployments"))
|
||||
}
|
||||
}
|
||||
return fullRequestURL
|
||||
}
|
||||
|
||||
func postConsumeQuota(ctx context.Context, tokenId int, quotaDelta int, totalQuota int, userId int, channelId int, modelRatio float64, groupRatio float64, modelName string, tokenName string) {
|
||||
// quotaDelta is remaining quota to be consumed
|
||||
err := model.PostConsumeTokenQuota(tokenId, quotaDelta)
|
||||
if err != nil {
|
||||
common.SysError("error consuming token remain quota: " + err.Error())
|
||||
}
|
||||
err = model.CacheUpdateUserQuota(userId)
|
||||
if err != nil {
|
||||
common.SysError("error update user quota cache: " + err.Error())
|
||||
}
|
||||
// totalQuota is total quota consumed
|
||||
if totalQuota != 0 {
|
||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, totalQuota, 0, modelName, tokenName, totalQuota, logContent)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, totalQuota)
|
||||
model.UpdateChannelUsedQuota(channelId, totalQuota)
|
||||
}
|
||||
if totalQuota <= 0 {
|
||||
common.LogError(ctx, fmt.Sprintf("totalQuota consumed is %d, something is wrong", totalQuota))
|
||||
}
|
||||
}
|
||||
|
||||
func GetAPIVersion(c *gin.Context) string {
|
||||
query := c.Request.URL.Query()
|
||||
apiVersion := query.Get("api-version")
|
||||
if apiVersion == "" {
|
||||
apiVersion = c.GetString("api_version")
|
||||
}
|
||||
return apiVersion
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package xunfei
|
||||
package controller
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
@@ -12,8 +12,6 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"one-api/common"
|
||||
"one-api/relay/channel/openai"
|
||||
"one-api/relay/constant"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@@ -21,26 +19,82 @@ import (
|
||||
// https://console.xfyun.cn/services/cbm
|
||||
// https://www.xfyun.cn/doc/spark/Web.html
|
||||
|
||||
func requestOpenAI2Xunfei(request openai.GeneralOpenAIRequest, xunfeiAppId string, domain string) *ChatRequest {
|
||||
messages := make([]Message, 0, len(request.Messages))
|
||||
type XunfeiMessage struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type XunfeiChatRequest struct {
|
||||
Header struct {
|
||||
AppId string `json:"app_id"`
|
||||
} `json:"header"`
|
||||
Parameter struct {
|
||||
Chat struct {
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Auditing bool `json:"auditing,omitempty"`
|
||||
} `json:"chat"`
|
||||
} `json:"parameter"`
|
||||
Payload struct {
|
||||
Message struct {
|
||||
Text []XunfeiMessage `json:"text"`
|
||||
} `json:"message"`
|
||||
} `json:"payload"`
|
||||
}
|
||||
|
||||
type XunfeiChatResponseTextItem struct {
|
||||
Content string `json:"content"`
|
||||
Role string `json:"role"`
|
||||
Index int `json:"index"`
|
||||
}
|
||||
|
||||
type XunfeiChatResponse struct {
|
||||
Header struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Sid string `json:"sid"`
|
||||
Status int `json:"status"`
|
||||
} `json:"header"`
|
||||
Payload struct {
|
||||
Choices struct {
|
||||
Status int `json:"status"`
|
||||
Seq int `json:"seq"`
|
||||
Text []XunfeiChatResponseTextItem `json:"text"`
|
||||
} `json:"choices"`
|
||||
Usage struct {
|
||||
//Text struct {
|
||||
// QuestionTokens string `json:"question_tokens"`
|
||||
// PromptTokens string `json:"prompt_tokens"`
|
||||
// CompletionTokens string `json:"completion_tokens"`
|
||||
// TotalTokens string `json:"total_tokens"`
|
||||
//} `json:"text"`
|
||||
Text Usage `json:"text"`
|
||||
} `json:"usage"`
|
||||
} `json:"payload"`
|
||||
}
|
||||
|
||||
func requestOpenAI2Xunfei(request GeneralOpenAIRequest, xunfeiAppId string, domain string) *XunfeiChatRequest {
|
||||
messages := make([]XunfeiMessage, 0, len(request.Messages))
|
||||
for _, message := range request.Messages {
|
||||
if message.Role == "system" {
|
||||
messages = append(messages, Message{
|
||||
messages = append(messages, XunfeiMessage{
|
||||
Role: "user",
|
||||
Content: message.StringContent(),
|
||||
})
|
||||
messages = append(messages, Message{
|
||||
messages = append(messages, XunfeiMessage{
|
||||
Role: "assistant",
|
||||
Content: "Okay",
|
||||
})
|
||||
} else {
|
||||
messages = append(messages, Message{
|
||||
messages = append(messages, XunfeiMessage{
|
||||
Role: message.Role,
|
||||
Content: message.StringContent(),
|
||||
})
|
||||
}
|
||||
}
|
||||
xunfeiRequest := ChatRequest{}
|
||||
xunfeiRequest := XunfeiChatRequest{}
|
||||
xunfeiRequest.Header.AppId = xunfeiAppId
|
||||
xunfeiRequest.Parameter.Chat.Domain = domain
|
||||
xunfeiRequest.Parameter.Chat.Temperature = request.Temperature
|
||||
@@ -50,49 +104,49 @@ func requestOpenAI2Xunfei(request openai.GeneralOpenAIRequest, xunfeiAppId strin
|
||||
return &xunfeiRequest
|
||||
}
|
||||
|
||||
func responseXunfei2OpenAI(response *ChatResponse) *openai.TextResponse {
|
||||
func responseXunfei2OpenAI(response *XunfeiChatResponse) *OpenAITextResponse {
|
||||
if len(response.Payload.Choices.Text) == 0 {
|
||||
response.Payload.Choices.Text = []ChatResponseTextItem{
|
||||
response.Payload.Choices.Text = []XunfeiChatResponseTextItem{
|
||||
{
|
||||
Content: "",
|
||||
},
|
||||
}
|
||||
}
|
||||
choice := openai.TextResponseChoice{
|
||||
choice := OpenAITextResponseChoice{
|
||||
Index: 0,
|
||||
Message: openai.Message{
|
||||
Message: Message{
|
||||
Role: "assistant",
|
||||
Content: response.Payload.Choices.Text[0].Content,
|
||||
},
|
||||
FinishReason: constant.StopFinishReason,
|
||||
FinishReason: stopFinishReason,
|
||||
}
|
||||
fullTextResponse := openai.TextResponse{
|
||||
fullTextResponse := OpenAITextResponse{
|
||||
Object: "chat.completion",
|
||||
Created: common.GetTimestamp(),
|
||||
Choices: []openai.TextResponseChoice{choice},
|
||||
Choices: []OpenAITextResponseChoice{choice},
|
||||
Usage: response.Payload.Usage.Text,
|
||||
}
|
||||
return &fullTextResponse
|
||||
}
|
||||
|
||||
func streamResponseXunfei2OpenAI(xunfeiResponse *ChatResponse) *openai.ChatCompletionsStreamResponse {
|
||||
func streamResponseXunfei2OpenAI(xunfeiResponse *XunfeiChatResponse) *ChatCompletionsStreamResponse {
|
||||
if len(xunfeiResponse.Payload.Choices.Text) == 0 {
|
||||
xunfeiResponse.Payload.Choices.Text = []ChatResponseTextItem{
|
||||
xunfeiResponse.Payload.Choices.Text = []XunfeiChatResponseTextItem{
|
||||
{
|
||||
Content: "",
|
||||
},
|
||||
}
|
||||
}
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
var choice ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = xunfeiResponse.Payload.Choices.Text[0].Content
|
||||
if xunfeiResponse.Payload.Choices.Status == 2 {
|
||||
choice.FinishReason = &constant.StopFinishReason
|
||||
choice.FinishReason = &stopFinishReason
|
||||
}
|
||||
response := openai.ChatCompletionsStreamResponse{
|
||||
response := ChatCompletionsStreamResponse{
|
||||
Object: "chat.completion.chunk",
|
||||
Created: common.GetTimestamp(),
|
||||
Model: "SparkDesk",
|
||||
Choices: []openai.ChatCompletionsStreamResponseChoice{choice},
|
||||
Choices: []ChatCompletionsStreamResponseChoice{choice},
|
||||
}
|
||||
return &response
|
||||
}
|
||||
@@ -123,14 +177,14 @@ func buildXunfeiAuthUrl(hostUrl string, apiKey, apiSecret string) string {
|
||||
return callUrl
|
||||
}
|
||||
|
||||
func StreamHandler(c *gin.Context, textRequest openai.GeneralOpenAIRequest, appId string, apiSecret string, apiKey string) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
func xunfeiStreamHandler(c *gin.Context, textRequest GeneralOpenAIRequest, appId string, apiSecret string, apiKey string) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
domain, authUrl := getXunfeiAuthUrl(c, apiKey, apiSecret)
|
||||
dataChan, stopChan, err := xunfeiMakeRequest(textRequest, domain, authUrl, appId)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "make xunfei request err", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "make xunfei request err", http.StatusInternalServerError), nil
|
||||
}
|
||||
common.SetEventStreamHeaders(c)
|
||||
var usage openai.Usage
|
||||
setEventStreamHeaders(c)
|
||||
var usage Usage
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case xunfeiResponse := <-dataChan:
|
||||
@@ -153,15 +207,15 @@ func StreamHandler(c *gin.Context, textRequest openai.GeneralOpenAIRequest, appI
|
||||
return nil, &usage
|
||||
}
|
||||
|
||||
func Handler(c *gin.Context, textRequest openai.GeneralOpenAIRequest, appId string, apiSecret string, apiKey string) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
func xunfeiHandler(c *gin.Context, textRequest GeneralOpenAIRequest, appId string, apiSecret string, apiKey string) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
domain, authUrl := getXunfeiAuthUrl(c, apiKey, apiSecret)
|
||||
dataChan, stopChan, err := xunfeiMakeRequest(textRequest, domain, authUrl, appId)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "make xunfei request err", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "make xunfei request err", http.StatusInternalServerError), nil
|
||||
}
|
||||
var usage openai.Usage
|
||||
var usage Usage
|
||||
var content string
|
||||
var xunfeiResponse ChatResponse
|
||||
var xunfeiResponse XunfeiChatResponse
|
||||
stop := false
|
||||
for !stop {
|
||||
select {
|
||||
@@ -177,7 +231,7 @@ func Handler(c *gin.Context, textRequest openai.GeneralOpenAIRequest, appId stri
|
||||
}
|
||||
}
|
||||
if len(xunfeiResponse.Payload.Choices.Text) == 0 {
|
||||
xunfeiResponse.Payload.Choices.Text = []ChatResponseTextItem{
|
||||
xunfeiResponse.Payload.Choices.Text = []XunfeiChatResponseTextItem{
|
||||
{
|
||||
Content: "",
|
||||
},
|
||||
@@ -188,14 +242,14 @@ func Handler(c *gin.Context, textRequest openai.GeneralOpenAIRequest, appId stri
|
||||
response := responseXunfei2OpenAI(&xunfeiResponse)
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
c.Writer.Header().Set("Content-Type", "application/json")
|
||||
_, _ = c.Writer.Write(jsonResponse)
|
||||
return nil, &usage
|
||||
}
|
||||
|
||||
func xunfeiMakeRequest(textRequest openai.GeneralOpenAIRequest, domain, authUrl, appId string) (chan ChatResponse, chan bool, error) {
|
||||
func xunfeiMakeRequest(textRequest GeneralOpenAIRequest, domain, authUrl, appId string) (chan XunfeiChatResponse, chan bool, error) {
|
||||
d := websocket.Dialer{
|
||||
HandshakeTimeout: 5 * time.Second,
|
||||
}
|
||||
@@ -209,7 +263,7 @@ func xunfeiMakeRequest(textRequest openai.GeneralOpenAIRequest, domain, authUrl,
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
dataChan := make(chan ChatResponse)
|
||||
dataChan := make(chan XunfeiChatResponse)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
@@ -218,7 +272,7 @@ func xunfeiMakeRequest(textRequest openai.GeneralOpenAIRequest, domain, authUrl,
|
||||
common.SysError("error reading stream response: " + err.Error())
|
||||
break
|
||||
}
|
||||
var response ChatResponse
|
||||
var response XunfeiChatResponse
|
||||
err = json.Unmarshal(msg, &response)
|
||||
if err != nil {
|
||||
common.SysError("error unmarshalling stream response: " + err.Error())
|
||||
@@ -1,4 +1,4 @@
|
||||
package zhipu
|
||||
package controller
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/relay/channel/openai"
|
||||
"one-api/relay/constant"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -20,13 +18,53 @@ import (
|
||||
// https://open.bigmodel.cn/api/paas/v3/model-api/chatglm_std/invoke
|
||||
// https://open.bigmodel.cn/api/paas/v3/model-api/chatglm_std/sse-invoke
|
||||
|
||||
type ZhipuMessage struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type ZhipuRequest struct {
|
||||
Prompt []ZhipuMessage `json:"prompt"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
RequestId string `json:"request_id,omitempty"`
|
||||
Incremental bool `json:"incremental,omitempty"`
|
||||
}
|
||||
|
||||
type ZhipuResponseData struct {
|
||||
TaskId string `json:"task_id"`
|
||||
RequestId string `json:"request_id"`
|
||||
TaskStatus string `json:"task_status"`
|
||||
Choices []ZhipuMessage `json:"choices"`
|
||||
Usage `json:"usage"`
|
||||
}
|
||||
|
||||
type ZhipuResponse struct {
|
||||
Code int `json:"code"`
|
||||
Msg string `json:"msg"`
|
||||
Success bool `json:"success"`
|
||||
Data ZhipuResponseData `json:"data"`
|
||||
}
|
||||
|
||||
type ZhipuStreamMetaResponse struct {
|
||||
RequestId string `json:"request_id"`
|
||||
TaskId string `json:"task_id"`
|
||||
TaskStatus string `json:"task_status"`
|
||||
Usage `json:"usage"`
|
||||
}
|
||||
|
||||
type zhipuTokenData struct {
|
||||
Token string
|
||||
ExpiryTime time.Time
|
||||
}
|
||||
|
||||
var zhipuTokens sync.Map
|
||||
var expSeconds int64 = 24 * 3600
|
||||
|
||||
func GetToken(apikey string) string {
|
||||
func getZhipuToken(apikey string) string {
|
||||
data, ok := zhipuTokens.Load(apikey)
|
||||
if ok {
|
||||
tokenData := data.(tokenData)
|
||||
tokenData := data.(zhipuTokenData)
|
||||
if time.Now().Before(tokenData.ExpiryTime) {
|
||||
return tokenData.Token
|
||||
}
|
||||
@@ -62,7 +100,7 @@ func GetToken(apikey string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
zhipuTokens.Store(apikey, tokenData{
|
||||
zhipuTokens.Store(apikey, zhipuTokenData{
|
||||
Token: tokenString,
|
||||
ExpiryTime: expiryTime,
|
||||
})
|
||||
@@ -70,26 +108,26 @@ func GetToken(apikey string) string {
|
||||
return tokenString
|
||||
}
|
||||
|
||||
func ConvertRequest(request openai.GeneralOpenAIRequest) *Request {
|
||||
messages := make([]Message, 0, len(request.Messages))
|
||||
func requestOpenAI2Zhipu(request GeneralOpenAIRequest) *ZhipuRequest {
|
||||
messages := make([]ZhipuMessage, 0, len(request.Messages))
|
||||
for _, message := range request.Messages {
|
||||
if message.Role == "system" {
|
||||
messages = append(messages, Message{
|
||||
messages = append(messages, ZhipuMessage{
|
||||
Role: "system",
|
||||
Content: message.StringContent(),
|
||||
})
|
||||
messages = append(messages, Message{
|
||||
messages = append(messages, ZhipuMessage{
|
||||
Role: "user",
|
||||
Content: "Okay",
|
||||
})
|
||||
} else {
|
||||
messages = append(messages, Message{
|
||||
messages = append(messages, ZhipuMessage{
|
||||
Role: message.Role,
|
||||
Content: message.StringContent(),
|
||||
})
|
||||
}
|
||||
}
|
||||
return &Request{
|
||||
return &ZhipuRequest{
|
||||
Prompt: messages,
|
||||
Temperature: request.Temperature,
|
||||
TopP: request.TopP,
|
||||
@@ -97,18 +135,18 @@ func ConvertRequest(request openai.GeneralOpenAIRequest) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func responseZhipu2OpenAI(response *Response) *openai.TextResponse {
|
||||
fullTextResponse := openai.TextResponse{
|
||||
func responseZhipu2OpenAI(response *ZhipuResponse) *OpenAITextResponse {
|
||||
fullTextResponse := OpenAITextResponse{
|
||||
Id: response.Data.TaskId,
|
||||
Object: "chat.completion",
|
||||
Created: common.GetTimestamp(),
|
||||
Choices: make([]openai.TextResponseChoice, 0, len(response.Data.Choices)),
|
||||
Choices: make([]OpenAITextResponseChoice, 0, len(response.Data.Choices)),
|
||||
Usage: response.Data.Usage,
|
||||
}
|
||||
for i, choice := range response.Data.Choices {
|
||||
openaiChoice := openai.TextResponseChoice{
|
||||
openaiChoice := OpenAITextResponseChoice{
|
||||
Index: i,
|
||||
Message: openai.Message{
|
||||
Message: Message{
|
||||
Role: choice.Role,
|
||||
Content: strings.Trim(choice.Content, "\""),
|
||||
},
|
||||
@@ -122,34 +160,34 @@ func responseZhipu2OpenAI(response *Response) *openai.TextResponse {
|
||||
return &fullTextResponse
|
||||
}
|
||||
|
||||
func streamResponseZhipu2OpenAI(zhipuResponse string) *openai.ChatCompletionsStreamResponse {
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
func streamResponseZhipu2OpenAI(zhipuResponse string) *ChatCompletionsStreamResponse {
|
||||
var choice ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = zhipuResponse
|
||||
response := openai.ChatCompletionsStreamResponse{
|
||||
response := ChatCompletionsStreamResponse{
|
||||
Object: "chat.completion.chunk",
|
||||
Created: common.GetTimestamp(),
|
||||
Model: "chatglm",
|
||||
Choices: []openai.ChatCompletionsStreamResponseChoice{choice},
|
||||
Choices: []ChatCompletionsStreamResponseChoice{choice},
|
||||
}
|
||||
return &response
|
||||
}
|
||||
|
||||
func streamMetaResponseZhipu2OpenAI(zhipuResponse *StreamMetaResponse) (*openai.ChatCompletionsStreamResponse, *openai.Usage) {
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
func streamMetaResponseZhipu2OpenAI(zhipuResponse *ZhipuStreamMetaResponse) (*ChatCompletionsStreamResponse, *Usage) {
|
||||
var choice ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = ""
|
||||
choice.FinishReason = &constant.StopFinishReason
|
||||
response := openai.ChatCompletionsStreamResponse{
|
||||
choice.FinishReason = &stopFinishReason
|
||||
response := ChatCompletionsStreamResponse{
|
||||
Id: zhipuResponse.RequestId,
|
||||
Object: "chat.completion.chunk",
|
||||
Created: common.GetTimestamp(),
|
||||
Model: "chatglm",
|
||||
Choices: []openai.ChatCompletionsStreamResponseChoice{choice},
|
||||
Choices: []ChatCompletionsStreamResponseChoice{choice},
|
||||
}
|
||||
return &response, &zhipuResponse.Usage
|
||||
}
|
||||
|
||||
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
var usage *openai.Usage
|
||||
func zhipuStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
var usage *Usage
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
@@ -186,7 +224,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
common.SetEventStreamHeaders(c)
|
||||
setEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
@@ -199,7 +237,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
|
||||
return true
|
||||
case data := <-metaChan:
|
||||
var zhipuResponse StreamMetaResponse
|
||||
var zhipuResponse ZhipuStreamMetaResponse
|
||||
err := json.Unmarshal([]byte(data), &zhipuResponse)
|
||||
if err != nil {
|
||||
common.SysError("error unmarshalling stream response: " + err.Error())
|
||||
@@ -221,28 +259,28 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
||||
})
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
return nil, usage
|
||||
}
|
||||
|
||||
func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||
var zhipuResponse Response
|
||||
func zhipuHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
var zhipuResponse ZhipuResponse
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &zhipuResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
if !zhipuResponse.Success {
|
||||
return &openai.ErrorWithStatusCode{
|
||||
Error: openai.Error{
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: OpenAIError{
|
||||
Message: zhipuResponse.Msg,
|
||||
Type: "zhipu_error",
|
||||
Param: "",
|
||||
@@ -252,10 +290,9 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
|
||||
}, nil
|
||||
}
|
||||
fullTextResponse := responseZhipu2OpenAI(&zhipuResponse)
|
||||
fullTextResponse.Model = "chatglm"
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
c.Writer.Header().Set("Content-Type", "application/json")
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
@@ -4,53 +4,291 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/relay/channel/openai"
|
||||
"one-api/relay/constant"
|
||||
"one-api/relay/controller"
|
||||
"one-api/relay/util"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
Role string `json:"role"`
|
||||
Content any `json:"content"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
type ImageURL struct {
|
||||
Url string `json:"url,omitempty"`
|
||||
Detail string `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
type TextContent struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
}
|
||||
|
||||
type ImageContent struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
ImageURL *ImageURL `json:"image_url,omitempty"`
|
||||
}
|
||||
|
||||
func (m Message) StringContent() string {
|
||||
content, ok := m.Content.(string)
|
||||
if ok {
|
||||
return content
|
||||
}
|
||||
contentList, ok := m.Content.([]any)
|
||||
if ok {
|
||||
var contentStr string
|
||||
for _, contentItem := range contentList {
|
||||
contentMap, ok := contentItem.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if contentMap["type"] == "text" {
|
||||
if subStr, ok := contentMap["text"].(string); ok {
|
||||
contentStr += subStr
|
||||
}
|
||||
}
|
||||
}
|
||||
return contentStr
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
const (
|
||||
RelayModeUnknown = iota
|
||||
RelayModeChatCompletions
|
||||
RelayModeCompletions
|
||||
RelayModeEmbeddings
|
||||
RelayModeModerations
|
||||
RelayModeImagesGenerations
|
||||
RelayModeEdits
|
||||
RelayModeAudioSpeech
|
||||
RelayModeAudioTranscription
|
||||
RelayModeAudioTranslation
|
||||
)
|
||||
|
||||
// https://platform.openai.com/docs/api-reference/chat
|
||||
|
||||
func Relay(c *gin.Context) {
|
||||
relayMode := constant.RelayModeUnknown
|
||||
if strings.HasPrefix(c.Request.URL.Path, "/v1/chat/completions") {
|
||||
relayMode = constant.RelayModeChatCompletions
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/completions") {
|
||||
relayMode = constant.RelayModeCompletions
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/embeddings") {
|
||||
relayMode = constant.RelayModeEmbeddings
|
||||
} else if strings.HasSuffix(c.Request.URL.Path, "embeddings") {
|
||||
relayMode = constant.RelayModeEmbeddings
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/moderations") {
|
||||
relayMode = constant.RelayModeModerations
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") {
|
||||
relayMode = constant.RelayModeImagesGenerations
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/edits") {
|
||||
relayMode = constant.RelayModeEdits
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/speech") {
|
||||
relayMode = constant.RelayModeAudioSpeech
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/transcriptions") {
|
||||
relayMode = constant.RelayModeAudioTranscription
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/translations") {
|
||||
relayMode = constant.RelayModeAudioTranslation
|
||||
type ResponseFormat struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
type GeneralOpenAIRequest struct {
|
||||
Model string `json:"model,omitempty"`
|
||||
Messages []Message `json:"messages,omitempty"`
|
||||
Prompt any `json:"prompt,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
N int `json:"n,omitempty"`
|
||||
Input any `json:"input,omitempty"`
|
||||
Instruction string `json:"instruction,omitempty"`
|
||||
Size string `json:"size,omitempty"`
|
||||
Functions any `json:"functions,omitempty"`
|
||||
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
||||
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
||||
ResponseFormat *ResponseFormat `json:"response_format,omitempty"`
|
||||
Seed float64 `json:"seed,omitempty"`
|
||||
Tools any `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
}
|
||||
|
||||
func (r GeneralOpenAIRequest) ParseInput() []string {
|
||||
if r.Input == nil {
|
||||
return nil
|
||||
}
|
||||
var err *openai.ErrorWithStatusCode
|
||||
var input []string
|
||||
switch r.Input.(type) {
|
||||
case string:
|
||||
input = []string{r.Input.(string)}
|
||||
case []any:
|
||||
input = make([]string, 0, len(r.Input.([]any)))
|
||||
for _, item := range r.Input.([]any) {
|
||||
if str, ok := item.(string); ok {
|
||||
input = append(input, str)
|
||||
}
|
||||
}
|
||||
}
|
||||
return input
|
||||
}
|
||||
|
||||
type ChatRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages []Message `json:"messages"`
|
||||
MaxTokens int `json:"max_tokens"`
|
||||
}
|
||||
|
||||
type TextRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages []Message `json:"messages"`
|
||||
Prompt string `json:"prompt"`
|
||||
MaxTokens int `json:"max_tokens"`
|
||||
//Stream bool `json:"stream"`
|
||||
}
|
||||
|
||||
// ImageRequest docs: https://platform.openai.com/docs/api-reference/images/create
|
||||
type ImageRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt" binding:"required"`
|
||||
N int `json:"n,omitempty"`
|
||||
Size string `json:"size,omitempty"`
|
||||
Quality string `json:"quality,omitempty"`
|
||||
ResponseFormat string `json:"response_format,omitempty"`
|
||||
Style string `json:"style,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
}
|
||||
|
||||
type WhisperJSONResponse struct {
|
||||
Text string `json:"text,omitempty"`
|
||||
}
|
||||
|
||||
type WhisperVerboseJSONResponse struct {
|
||||
Task string `json:"task,omitempty"`
|
||||
Language string `json:"language,omitempty"`
|
||||
Duration float64 `json:"duration,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
Segments []Segment `json:"segments,omitempty"`
|
||||
}
|
||||
|
||||
type Segment struct {
|
||||
Id int `json:"id"`
|
||||
Seek int `json:"seek"`
|
||||
Start float64 `json:"start"`
|
||||
End float64 `json:"end"`
|
||||
Text string `json:"text"`
|
||||
Tokens []int `json:"tokens"`
|
||||
Temperature float64 `json:"temperature"`
|
||||
AvgLogprob float64 `json:"avg_logprob"`
|
||||
CompressionRatio float64 `json:"compression_ratio"`
|
||||
NoSpeechProb float64 `json:"no_speech_prob"`
|
||||
}
|
||||
|
||||
type TextToSpeechRequest struct {
|
||||
Model string `json:"model" binding:"required"`
|
||||
Input string `json:"input" binding:"required"`
|
||||
Voice string `json:"voice" binding:"required"`
|
||||
Speed float64 `json:"speed"`
|
||||
ResponseFormat string `json:"response_format"`
|
||||
}
|
||||
|
||||
type Usage struct {
|
||||
PromptTokens int `json:"prompt_tokens"`
|
||||
CompletionTokens int `json:"completion_tokens"`
|
||||
TotalTokens int `json:"total_tokens"`
|
||||
}
|
||||
|
||||
type OpenAIError struct {
|
||||
Message string `json:"message"`
|
||||
Type string `json:"type"`
|
||||
Param string `json:"param"`
|
||||
Code any `json:"code"`
|
||||
}
|
||||
|
||||
type OpenAIErrorWithStatusCode struct {
|
||||
OpenAIError
|
||||
StatusCode int `json:"status_code"`
|
||||
}
|
||||
|
||||
type TextResponse struct {
|
||||
Choices []OpenAITextResponseChoice `json:"choices"`
|
||||
Usage `json:"usage"`
|
||||
Error OpenAIError `json:"error"`
|
||||
}
|
||||
|
||||
type OpenAITextResponseChoice struct {
|
||||
Index int `json:"index"`
|
||||
Message `json:"message"`
|
||||
FinishReason string `json:"finish_reason"`
|
||||
}
|
||||
|
||||
type OpenAITextResponse struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Choices []OpenAITextResponseChoice `json:"choices"`
|
||||
Usage `json:"usage"`
|
||||
}
|
||||
|
||||
type OpenAIEmbeddingResponseItem struct {
|
||||
Object string `json:"object"`
|
||||
Index int `json:"index"`
|
||||
Embedding []float64 `json:"embedding"`
|
||||
}
|
||||
|
||||
type OpenAIEmbeddingResponse struct {
|
||||
Object string `json:"object"`
|
||||
Data []OpenAIEmbeddingResponseItem `json:"data"`
|
||||
Model string `json:"model"`
|
||||
Usage `json:"usage"`
|
||||
}
|
||||
|
||||
type ImageResponse struct {
|
||||
Created int `json:"created"`
|
||||
Data []struct {
|
||||
Url string `json:"url"`
|
||||
}
|
||||
}
|
||||
|
||||
type ChatCompletionsStreamResponseChoice struct {
|
||||
Delta struct {
|
||||
Content string `json:"content"`
|
||||
} `json:"delta"`
|
||||
FinishReason *string `json:"finish_reason,omitempty"`
|
||||
}
|
||||
|
||||
type ChatCompletionsStreamResponse struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Choices []ChatCompletionsStreamResponseChoice `json:"choices"`
|
||||
}
|
||||
|
||||
type CompletionsStreamResponse struct {
|
||||
Choices []struct {
|
||||
Text string `json:"text"`
|
||||
FinishReason string `json:"finish_reason"`
|
||||
} `json:"choices"`
|
||||
}
|
||||
|
||||
func Relay(c *gin.Context) {
|
||||
relayMode := RelayModeUnknown
|
||||
if strings.HasPrefix(c.Request.URL.Path, "/v1/chat/completions") {
|
||||
relayMode = RelayModeChatCompletions
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/completions") {
|
||||
relayMode = RelayModeCompletions
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/embeddings") {
|
||||
relayMode = RelayModeEmbeddings
|
||||
} else if strings.HasSuffix(c.Request.URL.Path, "embeddings") {
|
||||
relayMode = RelayModeEmbeddings
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/moderations") {
|
||||
relayMode = RelayModeModerations
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") {
|
||||
relayMode = RelayModeImagesGenerations
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/edits") {
|
||||
relayMode = RelayModeEdits
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/speech") {
|
||||
relayMode = RelayModeAudioSpeech
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/transcriptions") {
|
||||
relayMode = RelayModeAudioTranscription
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/translations") {
|
||||
relayMode = RelayModeAudioTranslation
|
||||
}
|
||||
var err *OpenAIErrorWithStatusCode
|
||||
switch relayMode {
|
||||
case constant.RelayModeImagesGenerations:
|
||||
err = controller.RelayImageHelper(c, relayMode)
|
||||
case constant.RelayModeAudioSpeech:
|
||||
case RelayModeImagesGenerations:
|
||||
err = relayImageHelper(c, relayMode)
|
||||
case RelayModeAudioSpeech:
|
||||
fallthrough
|
||||
case constant.RelayModeAudioTranslation:
|
||||
case RelayModeAudioTranslation:
|
||||
fallthrough
|
||||
case constant.RelayModeAudioTranscription:
|
||||
err = controller.RelayAudioHelper(c, relayMode)
|
||||
case RelayModeAudioTranscription:
|
||||
err = relayAudioHelper(c, relayMode)
|
||||
default:
|
||||
err = controller.RelayTextHelper(c, relayMode)
|
||||
err = relayTextHelper(c, relayMode)
|
||||
}
|
||||
if err != nil {
|
||||
requestId := c.GetString(common.RequestIdKey)
|
||||
@@ -63,17 +301,17 @@ func Relay(c *gin.Context) {
|
||||
c.Redirect(http.StatusTemporaryRedirect, fmt.Sprintf("%s?retry=%d", c.Request.URL.Path, retryTimes-1))
|
||||
} else {
|
||||
if err.StatusCode == http.StatusTooManyRequests {
|
||||
err.Error.Message = "当前分组上游负载已饱和,请稍后再试"
|
||||
err.OpenAIError.Message = "当前分组上游负载已饱和,请稍后再试"
|
||||
}
|
||||
err.Error.Message = common.MessageWithRequestId(err.Error.Message, requestId)
|
||||
err.OpenAIError.Message = common.MessageWithRequestId(err.OpenAIError.Message, requestId)
|
||||
c.JSON(err.StatusCode, gin.H{
|
||||
"error": err.Error,
|
||||
"error": err.OpenAIError,
|
||||
})
|
||||
}
|
||||
channelId := c.GetInt("channel_id")
|
||||
common.LogError(c.Request.Context(), fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message))
|
||||
// https://platform.openai.com/docs/guides/error-codes/api-errors
|
||||
if util.ShouldDisableChannel(&err.Error, err.StatusCode) {
|
||||
if shouldDisableChannel(&err.OpenAIError, err.StatusCode) {
|
||||
channelId := c.GetInt("channel_id")
|
||||
channelName := c.GetString("channel_name")
|
||||
disableChannel(channelId, channelName, err.Message)
|
||||
@@ -82,7 +320,7 @@ func Relay(c *gin.Context) {
|
||||
}
|
||||
|
||||
func RelayNotImplemented(c *gin.Context) {
|
||||
err := openai.Error{
|
||||
err := OpenAIError{
|
||||
Message: "API not implemented",
|
||||
Type: "one_api_error",
|
||||
Param: "",
|
||||
@@ -94,7 +332,7 @@ func RelayNotImplemented(c *gin.Context) {
|
||||
}
|
||||
|
||||
func RelayNotFound(c *gin.Context) {
|
||||
err := openai.Error{
|
||||
err := OpenAIError{
|
||||
Message: fmt.Sprintf("Invalid URL (%s %s)", c.Request.Method, c.Request.URL.Path),
|
||||
Type: "invalid_request_error",
|
||||
Param: "",
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gin-contrib/sessions"
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -249,29 +248,6 @@ func GetUser(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
func GetUserDashboard(c *gin.Context) {
|
||||
id := c.GetInt("id")
|
||||
now := time.Now()
|
||||
startOfDay := now.Truncate(24*time.Hour).AddDate(0, 0, -6).Unix()
|
||||
endOfDay := now.Truncate(24 * time.Hour).Add(24*time.Hour - time.Second).Unix()
|
||||
|
||||
dashboards, err := model.SearchLogsByDayAndModel(id, int(startOfDay), int(endOfDay))
|
||||
if err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": "无法获取统计信息",
|
||||
"data": nil,
|
||||
})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"message": "",
|
||||
"data": dashboards,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func GenerateAccessToken(c *gin.Context) {
|
||||
id := c.GetInt("id")
|
||||
user, err := model.GetUserById(id, true)
|
||||
|
||||
4
go.mod
@@ -16,7 +16,7 @@ require (
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/pkoukk/tiktoken-go v0.1.5
|
||||
github.com/stretchr/testify v1.8.3
|
||||
golang.org/x/crypto v0.17.0
|
||||
golang.org/x/crypto v0.14.0
|
||||
golang.org/x/image v0.14.0
|
||||
gorm.io/driver/mysql v1.4.3
|
||||
gorm.io/driver/postgres v1.5.2
|
||||
@@ -58,7 +58,7 @@ require (
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
golang.org/x/arch v0.3.0 // indirect
|
||||
golang.org/x/net v0.17.0 // indirect
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/sys v0.13.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
||||
8
go.sum
@@ -150,8 +150,8 @@ golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUu
|
||||
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
|
||||
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
||||
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/image v0.14.0 h1:tNgSxAFe3jC4uYqvZdTr84SZoM1KfwdC9SKIFrLjFn4=
|
||||
golang.org/x/image v0.14.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
@@ -164,8 +164,8 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
|
||||
249
i18n/en.json
@@ -86,7 +86,6 @@
|
||||
"该令牌已过期": "The token has expired",
|
||||
"该令牌额度已用尽": "The token quota has been used up",
|
||||
"无效的令牌": "Invalid token",
|
||||
"令牌验证失败": "Token verification failed",
|
||||
"id 或 userId 为空!": "id or userId is empty!",
|
||||
"quota 不能为负数!": "quota cannot be negative!",
|
||||
"令牌额度不足": "Insufficient token quota",
|
||||
@@ -459,7 +458,6 @@
|
||||
"使用明细(总消耗额度:{renderQuota(stat.quota)})": "Usage Details (Total Consumption Quota: {renderQuota(stat.quota)})",
|
||||
"用户名称": "User Name",
|
||||
"令牌名称": "Token Name",
|
||||
"默认令牌": "Default Token",
|
||||
"留空则查询全部用户": "Leave blank to query all users",
|
||||
"留空则查询全部令牌": "Leave blank to query all tokens",
|
||||
"模型名称": "Model Name",
|
||||
@@ -528,250 +526,5 @@
|
||||
"模型版本": "Model version",
|
||||
"请输入星火大模型版本,注意是接口地址中的版本号,例如:v2.1": "Please enter the version of the Starfire model, note that it is the version number in the interface address, for example: v2.1",
|
||||
"点击查看": "click to view",
|
||||
"请确保已在 Azure 上创建了 gpt-35-turbo 模型,并且 apiVersion 已正确填写!": "Please make sure that the gpt-35-turbo model has been created on Azure, and the apiVersion has been filled in correctly!",
|
||||
"处理中...": "Processing...",
|
||||
"绑定成功!": "Binding successful!",
|
||||
"登录成功!": "Login successful!",
|
||||
"操作失败,重定向至登录界面中...": "Operation failed, redirecting to login screen...",
|
||||
"出现错误,第 ${count} 次重试中...": "An error occurred, retrying ${count}...",
|
||||
"首页": "Home",
|
||||
"渠道": "Channel",
|
||||
"令牌": "API Keys",
|
||||
"兑换": "Redeem",
|
||||
"充值": "Recharge",
|
||||
"用户": "Users",
|
||||
"日志": "Logs",
|
||||
"设置": "Settings",
|
||||
"关于": "About",
|
||||
"聊天": "Chat",
|
||||
"注销成功!": "Logout successful!",
|
||||
"注销": "Log out",
|
||||
"登录": "Log in",
|
||||
"注册": "Sign up",
|
||||
"加载{name}中...": "Loading {name}...",
|
||||
"未登录或登录已过期,请重新登录!": "Not logged in or login has expired, please log in again!",
|
||||
"请立刻修改默认密码!": "Please change the default password immediately!",
|
||||
"欢迎回来": "Welcome back",
|
||||
"没有账户?": "No account?",
|
||||
"立刻注册": "Sign up now",
|
||||
"用户名": "Username",
|
||||
"密码": "Password",
|
||||
"正在登录……": "Logging in...",
|
||||
"忘记密码": "Forgot password",
|
||||
"其他方式": "Other methods",
|
||||
"微信扫码关注公众号,输入「验证码」获取验证码(三分钟内有效)": "Scan the QR code with WeChat, follow the official account and enter 'verification code' to get the verification code (valid within three minutes)",
|
||||
"验证码": "Verification code",
|
||||
"全部用户": "All users",
|
||||
"当前用户": "Current user",
|
||||
"全部": "All",
|
||||
"消费": "Consumption",
|
||||
"管理": "Management",
|
||||
"系统": "System",
|
||||
"未知": "Unknown",
|
||||
"其他模型": "Other models",
|
||||
"复制成功": "Copy successful",
|
||||
"使用明细": "Usages",
|
||||
"刷新": "Refresh",
|
||||
"收起面板": "Collapse panel",
|
||||
"展开面板": "Expand panel",
|
||||
"显示查询选项": "Show search options",
|
||||
"隐藏查询选项": "Hide search options",
|
||||
"用户名称": "User name",
|
||||
"可选值": "Optional values",
|
||||
"渠道 ID": "Channel ID",
|
||||
"令牌名称": "Key name",
|
||||
"模型名称": "Model name",
|
||||
"起始时间": "Start time",
|
||||
"结束时间": "End time",
|
||||
"查询": "Query",
|
||||
"隐藏条形图": "Hide bar chart",
|
||||
"显示条形图": "Show bar chart",
|
||||
"折线条形图只展示最新50条数据": "Line and bar charts only show the latest 50 pieces of data",
|
||||
"总消耗": "Total consumption",
|
||||
"总共调用了 {payload[0].value} 次": "A total of {payload[0].value} calls were made",
|
||||
"{model.name}: {model.value} 次": "{model.name}: {model.value} times",
|
||||
"总共调用了 {payload[0].value} 次 {payload[0].name}": "A total of {payload[0].value} {payload[0].name} calls were made",
|
||||
"总消耗额度": "Total consumption limit",
|
||||
"暂无数据": "No data available",
|
||||
"更多数据统计图形即将到来,敬请期待!": "More data statistics graphics are coming soon, stay tuned!",
|
||||
"复制用户名": "Copy username",
|
||||
"{`共 ${counts} 条数据`}": "{`A total of ${counts} pieces of data`}",
|
||||
"共 0 条数据": "A total of 0 pieces of data",
|
||||
"选择明细分类": "Select detail category",
|
||||
"模型倍率": "model rate",
|
||||
"分组倍率": "group rate",
|
||||
"新密码已复制到剪贴板:": "New password has been copied to the clipboard:",
|
||||
"密码重置确认": "Password reset confirmation",
|
||||
"邮箱地址": "Email address",
|
||||
"新密码": "New password",
|
||||
"密码已复制到剪贴板:": "Password has been copied to the clipboard:",
|
||||
"密码重置完成": "Password reset complete",
|
||||
"提交": "Submit",
|
||||
"返回登录": "Return to login",
|
||||
"请稍后重试,浏览器环境检查未通过": "Please try again later, browser environment check failed",
|
||||
"重置邮件发送成功,请检查邮箱!": "Reset email sent successfully, please check your email!",
|
||||
"密码重置": "Password reset",
|
||||
"重试": "Retry",
|
||||
"组": "Group",
|
||||
"令牌已重置并已复制到剪贴板": "Token has been reset and copied to the clipboard",
|
||||
"邀请链接已复制到剪切板": "Invitation link has been copied to the clipboard",
|
||||
"系统令牌已复制到剪切板": "System token has been copied to the clipboard",
|
||||
"请输入你的账户名以确认删除!": "Please enter your account name to confirm deletion!",
|
||||
"账户已删除!": "Account has been deleted!",
|
||||
"微信账户绑定成功!": "WeChat account binding successful!",
|
||||
"请稍后几秒重试,Turnstile 正在检查用户环境!": "Please try again in a few seconds, Turnstile is checking the user environment!",
|
||||
"验证码发送成功,请检查邮箱!": "Verification code sent successfully, please check your email!",
|
||||
"邮箱账户绑定成功!": "Email account binding successful!",
|
||||
"个人信息": "Personal information",
|
||||
"编辑个人信息": "Edit personal information",
|
||||
"生成系统访问令牌": "Generate system access token",
|
||||
"复制邀请链接": "Copy invitation link",
|
||||
"删除个人帐户": "Delete personal account",
|
||||
"普通用户": "Regular user",
|
||||
"管理员": "Administrator",
|
||||
"超级管理员": "Super administrator",
|
||||
"显示名称": "Display name",
|
||||
"GitHub 账号": "GitHub account",
|
||||
"微信账号": "WeChat account",
|
||||
"修改个人信息只允许在电脑端进行。生成的令牌用于系统管理,而非用于请求 OpenAI 相关的服务,请知悉。": "Modifying personal information is only allowed on a computer. The generated token is for system management, not for requesting OpenAI related services. Please be aware.",
|
||||
"可用模型": "Available models",
|
||||
"账号绑定": "Account binding",
|
||||
"绑定微信": "Bind WeChat",
|
||||
"绑定 GitHub": "Bind GitHub",
|
||||
"绑定邮箱": "Bind Email",
|
||||
"绑定": "Bind",
|
||||
"绑定邮箱地址": "Bind email address",
|
||||
"输入邮箱地址": "Enter email address",
|
||||
"重新发送": "Resend",
|
||||
"获取验证码": "Get verification code",
|
||||
"确认绑定": "Confirm binding",
|
||||
"取消": "Cancel",
|
||||
"危险操作": "Dangerous operation",
|
||||
"您正在删除自己的帐户,将清空所有数据且不可恢复": "You are deleting your own account, all data will be cleared and cannot be recovered",
|
||||
"输入你的账户名": "Enter your account name",
|
||||
"以确认删除": "To confirm deletion",
|
||||
"确认删除": "Confirm deletion",
|
||||
"未使用": "Not used",
|
||||
"已禁用": "Disabled",
|
||||
"已使用": "Used",
|
||||
"未知状态": "Unknown status",
|
||||
"操作成功完成!": "Operation successfully completed!",
|
||||
"搜索兑换码的 ID 和名称 ...": "Search for the ID and name of the redemption code ...",
|
||||
"名称": "Name",
|
||||
"状态": "Status",
|
||||
"额度": "Quota",
|
||||
"创建时间": "Creation time",
|
||||
"兑换时间": "Redemption time",
|
||||
"操作": "Operation",
|
||||
"尚未兑换": "Not yet redeemed",
|
||||
"已复制到剪贴板!": "Copied to clipboard!",
|
||||
"无法复制到剪贴板,请手动复制,已将兑换码填入搜索框。": "Unable to copy to clipboard, please copy manually. The redemption code has been filled in the search box.",
|
||||
"复制": "Copy",
|
||||
"删除": "Delete",
|
||||
"禁用": "Disable",
|
||||
"启用": "Enable",
|
||||
"编辑": "Edit",
|
||||
"添加新的兑换码": "Add new redemption code",
|
||||
"密码长度不得小于 8 位!": "Password length must not be less than 8 characters!",
|
||||
"两次输入的密码不一致": "The two passwords entered do not match",
|
||||
"注册成功!": "Registration successful!",
|
||||
"请填写注册邮箱!": "Please fill in the registration email!",
|
||||
"请在${verificationTimeout}秒后再试": "Please try again after ${verificationTimeout} seconds",
|
||||
"验证码发送成功,请检查你的邮箱!": "Verification code sent successfully, please check your email!",
|
||||
"已有账户?": "Already have an account?",
|
||||
"请输入用户名(最长 12 位)": "Please enter a username (up to 12 characters)",
|
||||
"请输入密码(最短 8 位,最长 20 位)": "Please enter a password (minimum 8 characters, maximum 20 characters)",
|
||||
"请再次输入密码": "Please enter the password again",
|
||||
"请输入邮箱地址": "Please enter an email address",
|
||||
"秒后可重发": "Can be resent after seconds",
|
||||
"请输入邮箱验证码": "Please enter the email verification code",
|
||||
"已过期": "Expired",
|
||||
"已启用": "Enabled",
|
||||
"已耗尽": "Exhausted",
|
||||
"无": "None",
|
||||
"令牌密钥": "API Key",
|
||||
"令牌状态": "Key status",
|
||||
"已用额度": "Used quota",
|
||||
"剩余额度": "Remaining quota",
|
||||
"过期时间": "Expiration time",
|
||||
"你确定要删除这个令牌吗?": "Are you sure you want to delete this key?",
|
||||
"无法复制到剪贴板,请手动复制,已将令牌密钥填入搜索框": "Unable to copy to clipboard, please copy manually. The key key has been filled in the search box.",
|
||||
"无限制": "Unlimited",
|
||||
"永不过期": "Never expires",
|
||||
"使用 API 访问令牌进行服务鉴权和计费。": "Use API Key for service authentication and billing.",
|
||||
"API 访问令牌关系到您的个人利益,请妥善留存,不要与其他人共享,也不要保存在客户端代码中。": "API Key is related to your personal interests. Please keep it properly. Do not share it with others or save it in client code.",
|
||||
"创建令牌": "Create Key",
|
||||
"什么都还没有,快去创建一个令牌开始使用吧!": "Nothing yet, go create a key to start using!",
|
||||
"你确定要删除该令牌吗": "Are you sure you want to delete this key",
|
||||
"导出令牌信息": "Export key information",
|
||||
"错误:未登录或登录已过期,请重新登录!": "Error: Not logged in or login has expired, please log in again!",
|
||||
"错误:请求次数过多,请稍后再试!": "Error: Too many requests, please try again later!",
|
||||
"错误:服务器内部错误,请联系管理员!": "Error: Server internal error, please contact the online customer service!",
|
||||
"本站仅作演示之用,无服务端!": "This site is for demonstration purposes only, no server!",
|
||||
"错误:": "Error:",
|
||||
"加载首页内容失败...": "Failed to load homepage content...",
|
||||
"系统状况": "System status",
|
||||
"系统信息": "System information",
|
||||
"系统信息总览": "System information overview",
|
||||
"名称:": "Name:",
|
||||
"版本:": "Version:",
|
||||
"源码:": "Source code:",
|
||||
"启动时间:": "Startup time:",
|
||||
"系统配置": "System configuration",
|
||||
"系统配置总览": "System configuration overview",
|
||||
"邮箱验证:": "Email verification:",
|
||||
"未启用": "Not enabled",
|
||||
"Turnstile 用户校验:": "Turnstile user verification:",
|
||||
"页面不存在": "Page does not exist",
|
||||
"请检查你的浏览器地址是否正确": "Please check if your browser address is correct",
|
||||
"个人设置": "Personal settings",
|
||||
"运营设置": "Operations settings",
|
||||
"系统设置": "System settings",
|
||||
"其他设置": "Other settings",
|
||||
"默认令牌": "Default key",
|
||||
"过期时间必须在当前时间之后!": "Expiration time must be after the current time!",
|
||||
"额度必须大于等于 0!": "Quota must be greater than or equal to 0!",
|
||||
"过期时间格式错误!": "Expiration time format error!",
|
||||
"创建令牌数量必须大于等于 1!": "The number of keys to create must be greater than or equal to 1!",
|
||||
"令牌修改成功": "API Key modification successful",
|
||||
"令牌创建成功": "API Key creation successful",
|
||||
"更新令牌信息": "Update key information",
|
||||
"创建新的令牌": "Create a new key",
|
||||
"请输入名称": "Please enter a name",
|
||||
"请输入过期时间,格式为 yyyy-MM-dd HH:mm:ss,-1 表示无限制": "Please enter the expiration time, the format is yyyy-MM-dd HH:mm:ss, -1 means unlimited",
|
||||
"无限额度": "Unlimited quota",
|
||||
"注意:启用无限额度后,已用额度将不再进行计算。": "Note: After enabling unlimited quota, the used quota will no longer be calculated.",
|
||||
"等于": "Equals",
|
||||
"请输入额度(单位:token)": "Please enter the quota (unit: token)",
|
||||
"创建令牌数量": "Create key quantity",
|
||||
"请输入令牌数量": "Please enter the number of keys",
|
||||
"注意:令牌的额度仅用于限制令牌本身的最大额度使用量,实际的使用受到账户的剩余额度限制。": "Note: The quota of the key is only used to limit the maximum quota usage of the key itself, and the actual usage is subject to the remaining quota of the account.",
|
||||
"我的令牌": "My keys",
|
||||
"请输入额度兑换码!": "Please enter the redeem code!",
|
||||
"充值成功!": "Recharge successful!",
|
||||
"请求失败": "Request failed",
|
||||
"超级管理员未设置充值链接!": "The super administrator did not set a recharge link!",
|
||||
"充值额度": "Recharge quota",
|
||||
"兑换中...": "Redeeming...",
|
||||
"请点击充值以获取额度兑换码。": "Please click recharge to get the quota redemption code.",
|
||||
"用户信息更新成功!": "User information updated successfully!",
|
||||
"更新用户信息": "Update user information",
|
||||
"请输入新的用户名": "Please enter a new username",
|
||||
"请输入新的密码,最短 8 位": "Please enter a new password, at least 8 characters",
|
||||
"请输入新的显示名称": "Please enter a new display name",
|
||||
"分组": "Group",
|
||||
"请选择分组": "Please select a group",
|
||||
"请在系统设置页面编辑分组倍率以添加新的分组:": "Please edit the group rate on the system settings page to add a new group:",
|
||||
"请输入新的剩余额度": "Please enter a new remaining quota",
|
||||
"已绑定的 GitHub 账户": "Bound GitHub account",
|
||||
"此项只读,需要用户通过个人设置页面的相关绑定按钮进行绑定,不可直接修改": "This item is read-only, users need to bind through the relevant binding button on the personal settings page, cannot be directly modified",
|
||||
"已绑定的微信账户": "Bound WeChat account",
|
||||
"已绑定的邮箱账户": "Bound email account",
|
||||
"新版本可用:${data.version},请使用快捷键 Shift + F5 刷新页面": "New version available: ${data.version}, please refresh the page using the shortcut key Shift + F5",
|
||||
"无法正常连接至服务器!": "Unable to connect to the server normally!",
|
||||
"提示:": "Input:",
|
||||
"补全:": "Output:",
|
||||
"搜索令牌名称": "Search key name",
|
||||
"测试所有渠道": "Test all channels",
|
||||
"更新已启用渠道余额": "Update the balance of enabled channels"
|
||||
"请确保已在 Azure 上创建了 gpt-35-turbo 模型,并且 apiVersion 已正确填写!": "Please make sure that the gpt-35-turbo model has been created on Azure, and the apiVersion has been filled in correctly!"
|
||||
}
|
||||
|
||||
13
main.go
@@ -10,18 +10,20 @@ import (
|
||||
"one-api/controller"
|
||||
"one-api/middleware"
|
||||
"one-api/model"
|
||||
"one-api/relay/channel/openai"
|
||||
"one-api/router"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
//go:embed web/build/*
|
||||
//go:embed web/build
|
||||
var buildFS embed.FS
|
||||
|
||||
//go:embed web/build/index.html
|
||||
var indexPage []byte
|
||||
|
||||
func main() {
|
||||
common.SetupLogger()
|
||||
common.SysLog(fmt.Sprintf("One API %s started", common.Version))
|
||||
common.SysLog("One API " + common.Version + " started")
|
||||
if os.Getenv("GIN_MODE") != "debug" {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
}
|
||||
@@ -48,7 +50,6 @@ func main() {
|
||||
|
||||
// Initialize options
|
||||
model.InitOptionMap()
|
||||
common.SysLog(fmt.Sprintf("using theme %s", common.Theme))
|
||||
if common.RedisEnabled {
|
||||
// for compatibility with old versions
|
||||
common.MemoryCacheEnabled = true
|
||||
@@ -81,7 +82,7 @@ func main() {
|
||||
common.SysLog("batch update enabled with interval " + strconv.Itoa(common.BatchUpdateInterval) + "s")
|
||||
model.InitBatchUpdater()
|
||||
}
|
||||
openai.InitTokenEncoders()
|
||||
controller.InitTokenEncoders()
|
||||
|
||||
// Initialize HTTP server
|
||||
server := gin.New()
|
||||
@@ -94,7 +95,7 @@ func main() {
|
||||
store := cookie.NewStore([]byte(common.SessionSecret))
|
||||
server.Use(sessions.Sessions("session", store))
|
||||
|
||||
router.SetRouter(server, buildFS)
|
||||
router.SetRouter(server, buildFS, indexPage)
|
||||
var port = os.Getenv("PORT")
|
||||
if port == "" {
|
||||
port = strconv.Itoa(*common.Port)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
func RelayPanicRecover() gin.HandlerFunc {
|
||||
@@ -13,7 +12,6 @@ func RelayPanicRecover() gin.HandlerFunc {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
common.SysError(fmt.Sprintf("panic detected: %v", err))
|
||||
common.SysError(fmt.Sprintf("stacktrace from panic: %s", string(debug.Stack())))
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"error": gin.H{
|
||||
"message": fmt.Sprintf("Panic detected, error: %v. Please submit a issue here: https://github.com/songquanpeng/one-api", err),
|
||||
|
||||
44
model/log.go
@@ -3,15 +3,14 @@ package model
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"one-api/common"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"one-api/common"
|
||||
)
|
||||
|
||||
type Log struct {
|
||||
Id int `json:"id"`
|
||||
Id int `json:"id;index:idx_created_at_id,priority:1"`
|
||||
UserId int `json:"user_id" gorm:"index"`
|
||||
CreatedAt int64 `json:"created_at" gorm:"bigint;index:idx_created_at_type"`
|
||||
CreatedAt int64 `json:"created_at" gorm:"bigint;index:idx_created_at_id,priority:2;index:idx_created_at_type"`
|
||||
Type int `json:"type" gorm:"index:idx_created_at_type"`
|
||||
Content string `json:"content"`
|
||||
Username string `json:"username" gorm:"index:index_username_model_name,priority:2;default:''"`
|
||||
@@ -183,40 +182,3 @@ func DeleteOldLog(targetTimestamp int64) (int64, error) {
|
||||
result := DB.Where("created_at < ?", targetTimestamp).Delete(&Log{})
|
||||
return result.RowsAffected, result.Error
|
||||
}
|
||||
|
||||
type LogStatistic struct {
|
||||
Day string `gorm:"column:day"`
|
||||
ModelName string `gorm:"column:model_name"`
|
||||
RequestCount int `gorm:"column:request_count"`
|
||||
Quota int `gorm:"column:quota"`
|
||||
PromptTokens int `gorm:"column:prompt_tokens"`
|
||||
CompletionTokens int `gorm:"column:completion_tokens"`
|
||||
}
|
||||
|
||||
func SearchLogsByDayAndModel(userId, start, end int) (LogStatistics []*LogStatistic, err error) {
|
||||
groupSelect := "DATE_FORMAT(FROM_UNIXTIME(created_at), '%Y-%m-%d') as day"
|
||||
|
||||
if common.UsingPostgreSQL {
|
||||
groupSelect = "TO_CHAR(date_trunc('day', to_timestamp(created_at)), 'YYYY-MM-DD') as day"
|
||||
}
|
||||
|
||||
if common.UsingSQLite {
|
||||
groupSelect = "strftime('%Y-%m-%d', datetime(created_at, 'unixepoch')) as day"
|
||||
}
|
||||
|
||||
err = DB.Raw(`
|
||||
SELECT `+groupSelect+`,
|
||||
model_name, count(1) as request_count,
|
||||
sum(quota) as quota,
|
||||
sum(prompt_tokens) as prompt_tokens,
|
||||
sum(completion_tokens) as completion_tokens
|
||||
FROM logs
|
||||
WHERE type=2
|
||||
AND user_id= ?
|
||||
AND created_at BETWEEN ? AND ?
|
||||
GROUP BY day, model_name
|
||||
ORDER BY day, model_name
|
||||
`, userId, start, end).Scan(&LogStatistics).Error
|
||||
|
||||
return LogStatistics, err
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ var DB *gorm.DB
|
||||
|
||||
func createRootAccountIfNeed() error {
|
||||
var user User
|
||||
//if user.Status != util.UserStatusEnabled {
|
||||
//if user.Status != common.UserStatusEnabled {
|
||||
if err := DB.First(&user).Error; err != nil {
|
||||
common.SysLog("no user exists, create a root user for you: username is root, password is 123456")
|
||||
hashedPassword, err := common.Password2Hash("123456")
|
||||
|
||||
@@ -72,7 +72,6 @@ func InitOptionMap() {
|
||||
common.OptionMap["ChatLink"] = common.ChatLink
|
||||
common.OptionMap["QuotaPerUnit"] = strconv.FormatFloat(common.QuotaPerUnit, 'f', -1, 64)
|
||||
common.OptionMap["RetryTimes"] = strconv.Itoa(common.RetryTimes)
|
||||
common.OptionMap["Theme"] = common.Theme
|
||||
common.OptionMapRWMutex.Unlock()
|
||||
loadOptionsFromDatabase()
|
||||
}
|
||||
@@ -221,8 +220,6 @@ func updateOptionMap(key string, value string) (err error) {
|
||||
common.ChannelDisableThreshold, _ = strconv.ParseFloat(value, 64)
|
||||
case "QuotaPerUnit":
|
||||
common.QuotaPerUnit, _ = strconv.ParseFloat(value, 64)
|
||||
case "Theme":
|
||||
common.Theme = value
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -38,43 +38,39 @@ func ValidateUserToken(key string) (token *Token, err error) {
|
||||
return nil, errors.New("未提供令牌")
|
||||
}
|
||||
token, err = CacheGetTokenByKey(key)
|
||||
if err != nil {
|
||||
common.SysError("CacheGetTokenByKey failed: " + err.Error())
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, errors.New("无效的令牌")
|
||||
if err == nil {
|
||||
if token.Status == common.TokenStatusExhausted {
|
||||
return nil, errors.New("该令牌额度已用尽")
|
||||
} else if token.Status == common.TokenStatusExpired {
|
||||
return nil, errors.New("该令牌已过期")
|
||||
}
|
||||
return nil, errors.New("令牌验证失败")
|
||||
}
|
||||
if token.Status == common.TokenStatusExhausted {
|
||||
return nil, errors.New("该令牌额度已用尽")
|
||||
} else if token.Status == common.TokenStatusExpired {
|
||||
return nil, errors.New("该令牌已过期")
|
||||
}
|
||||
if token.Status != common.TokenStatusEnabled {
|
||||
return nil, errors.New("该令牌状态不可用")
|
||||
}
|
||||
if token.ExpiredTime != -1 && token.ExpiredTime < common.GetTimestamp() {
|
||||
if !common.RedisEnabled {
|
||||
token.Status = common.TokenStatusExpired
|
||||
err := token.SelectUpdate()
|
||||
if err != nil {
|
||||
common.SysError("failed to update token status" + err.Error())
|
||||
if token.Status != common.TokenStatusEnabled {
|
||||
return nil, errors.New("该令牌状态不可用")
|
||||
}
|
||||
if token.ExpiredTime != -1 && token.ExpiredTime < common.GetTimestamp() {
|
||||
if !common.RedisEnabled {
|
||||
token.Status = common.TokenStatusExpired
|
||||
err := token.SelectUpdate()
|
||||
if err != nil {
|
||||
common.SysError("failed to update token status" + err.Error())
|
||||
}
|
||||
}
|
||||
return nil, errors.New("该令牌已过期")
|
||||
}
|
||||
return nil, errors.New("该令牌已过期")
|
||||
}
|
||||
if !token.UnlimitedQuota && token.RemainQuota <= 0 {
|
||||
if !common.RedisEnabled {
|
||||
// in this case, we can make sure the token is exhausted
|
||||
token.Status = common.TokenStatusExhausted
|
||||
err := token.SelectUpdate()
|
||||
if err != nil {
|
||||
common.SysError("failed to update token status" + err.Error())
|
||||
if !token.UnlimitedQuota && token.RemainQuota <= 0 {
|
||||
if !common.RedisEnabled {
|
||||
// in this case, we can make sure the token is exhausted
|
||||
token.Status = common.TokenStatusExhausted
|
||||
err := token.SelectUpdate()
|
||||
if err != nil {
|
||||
common.SysError("failed to update token status" + err.Error())
|
||||
}
|
||||
}
|
||||
return nil, errors.New("该令牌额度已用尽")
|
||||
}
|
||||
return nil, errors.New("该令牌额度已用尽")
|
||||
return token, nil
|
||||
}
|
||||
return token, nil
|
||||
return nil, errors.New("无效的令牌")
|
||||
}
|
||||
|
||||
func GetTokenByIds(id int, userId int) (*Token, error) {
|
||||
|
||||
@@ -6,16 +6,17 @@ import (
|
||||
"gorm.io/gorm"
|
||||
"one-api/common"
|
||||
"strings"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// User if you add sensitive fields, don't forget to clean them in setupLogin function.
|
||||
// Otherwise, the sensitive information will be saved on local storage in plain text!
|
||||
type User struct {
|
||||
Id int `json:"id"`
|
||||
Username string `json:"username" gorm:"unique;index" validate:"max=12"`
|
||||
Password string `json:"password" gorm:"not null;" validate:"min=8,max=20"`
|
||||
Username string `json:"username" gorm:"unique;index" validate:"max=30"`
|
||||
Password string `json:"password" gorm:"not null;" validate:"min=8,max=30"`
|
||||
DisplayName string `json:"display_name" gorm:"index" validate:"max=20"`
|
||||
Role int `json:"role" gorm:"type:int;default:1"` // admin, util
|
||||
Role int `json:"role" gorm:"type:int;default:1"` // admin, common
|
||||
Status int `json:"status" gorm:"type:int;default:1"` // enabled, disabled
|
||||
Email string `json:"email" gorm:"index" validate:"max=50"`
|
||||
GitHubId string `json:"github_id" gorm:"column:github_id;index"`
|
||||
@@ -42,11 +43,12 @@ func GetAllUsers(startIdx int, num int) (users []*User, err error) {
|
||||
}
|
||||
|
||||
func SearchUsers(keyword string) (users []*User, err error) {
|
||||
if !common.UsingPostgreSQL {
|
||||
err = DB.Omit("password").Where("id = ? or username LIKE ? or email LIKE ? or display_name LIKE ?", keyword, keyword+"%", keyword+"%", keyword+"%").Find(&users).Error
|
||||
if uid, ok := strconv.Atoi(keyword); ok == nil {
|
||||
err = DB.Omit("password").Where("id = ? or username LIKE ? or email LIKE ? or display_name LIKE ?", uid, keyword+"%", keyword+"%", keyword+"%").Find(&users).Error
|
||||
} else {
|
||||
err = DB.Omit("password").Where("username LIKE ? or email LIKE ? or display_name LIKE ?", keyword+"%", keyword+"%", keyword+"%").Find(&users).Error
|
||||
}
|
||||
|
||||
return users, err
|
||||
}
|
||||
|
||||
@@ -141,15 +143,7 @@ func (user *User) ValidateAndFill() (err error) {
|
||||
if user.Username == "" || password == "" {
|
||||
return errors.New("用户名或密码为空")
|
||||
}
|
||||
err = DB.Where("username = ?", user.Username).First(user).Error
|
||||
if err != nil {
|
||||
// we must make sure check username firstly
|
||||
// consider this case: a malicious user set his username as other's email
|
||||
err := DB.Where("email = ?", user.Username).First(user).Error
|
||||
if err != nil {
|
||||
return errors.New("用户名或密码错误,或用户已被封禁")
|
||||
}
|
||||
}
|
||||
DB.Where(User{Username: user.Username}).First(user)
|
||||
okay := common.ValidatePasswordAndHash(password, user.Password)
|
||||
if !okay || user.Status != common.UserStatusEnabled {
|
||||
return errors.New("用户名或密码错误,或用户已被封禁")
|
||||
|
||||
@@ -1,9 +1,3 @@
|
||||
[//]: # (请按照以下格式关联 issue)
|
||||
[//]: # (请在提交 PR 前确认所提交的功能可用,附上截图即可,这将有助于项目维护者 review & merge 该 PR,谢谢)
|
||||
[//]: # (项目维护者一般仅在周末处理 PR,因此如若未能及时回复希望能理解)
|
||||
[//]: # (开发者交流群:910657413)
|
||||
[//]: # (请在提交 PR 之前删除上面的注释)
|
||||
|
||||
close #issue_number
|
||||
|
||||
我已确认该 PR 已自测通过,相关截图如下:
|
||||
@@ -1,32 +0,0 @@
|
||||
package aiproxy
|
||||
|
||||
type LibraryRequest struct {
|
||||
Model string `json:"model"`
|
||||
Query string `json:"query"`
|
||||
LibraryId string `json:"libraryId"`
|
||||
Stream bool `json:"stream"`
|
||||
}
|
||||
|
||||
type LibraryError struct {
|
||||
ErrCode int `json:"errCode"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type LibraryDocument struct {
|
||||
Title string `json:"title"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
type LibraryResponse struct {
|
||||
Success bool `json:"success"`
|
||||
Answer string `json:"answer"`
|
||||
Documents []LibraryDocument `json:"documents"`
|
||||
LibraryError
|
||||
}
|
||||
|
||||
type LibraryStreamResponse struct {
|
||||
Content string `json:"content"`
|
||||
Finish bool `json:"finish"`
|
||||
Model string `json:"model"`
|
||||
Documents []LibraryDocument `json:"documents"`
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
package ali
|
||||
|
||||
type Message struct {
|
||||
Content string `json:"content"`
|
||||
Role string `json:"role"`
|
||||
}
|
||||
|
||||
type Input struct {
|
||||
//Prompt string `json:"prompt"`
|
||||
Messages []Message `json:"messages"`
|
||||
}
|
||||
|
||||
type Parameters struct {
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
Seed uint64 `json:"seed,omitempty"`
|
||||
EnableSearch bool `json:"enable_search,omitempty"`
|
||||
IncrementalOutput bool `json:"incremental_output,omitempty"`
|
||||
}
|
||||
|
||||
type ChatRequest struct {
|
||||
Model string `json:"model"`
|
||||
Input Input `json:"input"`
|
||||
Parameters Parameters `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
type EmbeddingRequest struct {
|
||||
Model string `json:"model"`
|
||||
Input struct {
|
||||
Texts []string `json:"texts"`
|
||||
} `json:"input"`
|
||||
Parameters *struct {
|
||||
TextType string `json:"text_type,omitempty"`
|
||||
} `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
type Embedding struct {
|
||||
Embedding []float64 `json:"embedding"`
|
||||
TextIndex int `json:"text_index"`
|
||||
}
|
||||
|
||||
type EmbeddingResponse struct {
|
||||
Output struct {
|
||||
Embeddings []Embedding `json:"embeddings"`
|
||||
} `json:"output"`
|
||||
Usage Usage `json:"usage"`
|
||||
Error
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
RequestId string `json:"request_id"`
|
||||
}
|
||||
|
||||
type Usage struct {
|
||||
InputTokens int `json:"input_tokens"`
|
||||
OutputTokens int `json:"output_tokens"`
|
||||
TotalTokens int `json:"total_tokens"`
|
||||
}
|
||||
|
||||
type Output struct {
|
||||
Text string `json:"text"`
|
||||
FinishReason string `json:"finish_reason"`
|
||||
}
|
||||
|
||||
type ChatResponse struct {
|
||||
Output Output `json:"output"`
|
||||
Usage Usage `json:"usage"`
|
||||
Error
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package anthropic
|
||||
|
||||
type Metadata struct {
|
||||
UserId string `json:"user_id"`
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt"`
|
||||
MaxTokensToSample int `json:"max_tokens_to_sample"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
//Metadata `json:"metadata,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
Type string `json:"type"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
Completion string `json:"completion"`
|
||||
StopReason string `json:"stop_reason"`
|
||||
Model string `json:"model"`
|
||||
Error Error `json:"error"`
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
package baidu
|
||||
|
||||
import (
|
||||
"one-api/relay/channel/openai"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ChatResponse struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Result string `json:"result"`
|
||||
IsTruncated bool `json:"is_truncated"`
|
||||
NeedClearHistory bool `json:"need_clear_history"`
|
||||
Usage openai.Usage `json:"usage"`
|
||||
BaiduError
|
||||
}
|
||||
|
||||
type ChatStreamResponse struct {
|
||||
ChatResponse
|
||||
SentenceId int `json:"sentence_id"`
|
||||
IsEnd bool `json:"is_end"`
|
||||
}
|
||||
|
||||
type EmbeddingRequest struct {
|
||||
Input []string `json:"input"`
|
||||
}
|
||||
|
||||
type EmbeddingData struct {
|
||||
Object string `json:"object"`
|
||||
Embedding []float64 `json:"embedding"`
|
||||
Index int `json:"index"`
|
||||
}
|
||||
|
||||
type EmbeddingResponse struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Data []EmbeddingData `json:"data"`
|
||||
Usage openai.Usage `json:"usage"`
|
||||
BaiduError
|
||||
}
|
||||
|
||||
type AccessToken struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
Error string `json:"error,omitempty"`
|
||||
ErrorDescription string `json:"error_description,omitempty"`
|
||||
ExpiresIn int64 `json:"expires_in,omitempty"`
|
||||
ExpiresAt time.Time `json:"-"`
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"one-api/relay/channel/openai"
|
||||
)
|
||||
|
||||
type GeminiChatRequest struct {
|
||||
Contents []GeminiChatContent `json:"contents"`
|
||||
SafetySettings []GeminiChatSafetySettings `json:"safety_settings,omitempty"`
|
||||
GenerationConfig GeminiChatGenerationConfig `json:"generation_config,omitempty"`
|
||||
Tools []GeminiChatTools `json:"tools,omitempty"`
|
||||
}
|
||||
|
||||
type GeminiInlineData struct {
|
||||
MimeType string `json:"mimeType"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
type GeminiPart struct {
|
||||
Text string `json:"text,omitempty"`
|
||||
InlineData *GeminiInlineData `json:"inlineData,omitempty"`
|
||||
}
|
||||
|
||||
type GeminiChatContent struct {
|
||||
Role string `json:"role,omitempty"`
|
||||
Parts []GeminiPart `json:"parts"`
|
||||
}
|
||||
|
||||
type GeminiChatSafetySettings struct {
|
||||
Category string `json:"category"`
|
||||
Threshold string `json:"threshold"`
|
||||
}
|
||||
|
||||
type GeminiChatTools struct {
|
||||
FunctionDeclarations any `json:"functionDeclarations,omitempty"`
|
||||
}
|
||||
|
||||
type GeminiChatGenerationConfig struct {
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"topP,omitempty"`
|
||||
TopK float64 `json:"topK,omitempty"`
|
||||
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
|
||||
CandidateCount int `json:"candidateCount,omitempty"`
|
||||
StopSequences []string `json:"stopSequences,omitempty"`
|
||||
}
|
||||
|
||||
type PaLMChatMessage struct {
|
||||
Author string `json:"author"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type PaLMFilter struct {
|
||||
Reason string `json:"reason"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type PaLMPrompt struct {
|
||||
Messages []PaLMChatMessage `json:"messages"`
|
||||
}
|
||||
|
||||
type PaLMChatRequest struct {
|
||||
Prompt PaLMPrompt `json:"prompt"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
CandidateCount int `json:"candidateCount,omitempty"`
|
||||
TopP float64 `json:"topP,omitempty"`
|
||||
TopK int `json:"topK,omitempty"`
|
||||
}
|
||||
|
||||
type PaLMError struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
type PaLMChatResponse struct {
|
||||
Candidates []PaLMChatMessage `json:"candidates"`
|
||||
Messages []openai.Message `json:"messages"`
|
||||
Filters []PaLMFilter `json:"filters"`
|
||||
Error PaLMError `json:"error"`
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
package openai
|
||||
|
||||
const (
|
||||
ContentTypeText = "text"
|
||||
ContentTypeImageURL = "image_url"
|
||||
)
|
||||
@@ -1,283 +0,0 @@
|
||||
package openai
|
||||
|
||||
type Message struct {
|
||||
Role string `json:"role"`
|
||||
Content any `json:"content"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
type ImageURL struct {
|
||||
Url string `json:"url,omitempty"`
|
||||
Detail string `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
type TextContent struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
}
|
||||
|
||||
type ImageContent struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
ImageURL *ImageURL `json:"image_url,omitempty"`
|
||||
}
|
||||
|
||||
type OpenAIMessageContent struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Text string `json:"text"`
|
||||
ImageURL *ImageURL `json:"image_url,omitempty"`
|
||||
}
|
||||
|
||||
func (m Message) IsStringContent() bool {
|
||||
_, ok := m.Content.(string)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (m Message) StringContent() string {
|
||||
content, ok := m.Content.(string)
|
||||
if ok {
|
||||
return content
|
||||
}
|
||||
contentList, ok := m.Content.([]any)
|
||||
if ok {
|
||||
var contentStr string
|
||||
for _, contentItem := range contentList {
|
||||
contentMap, ok := contentItem.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if contentMap["type"] == ContentTypeText {
|
||||
if subStr, ok := contentMap["text"].(string); ok {
|
||||
contentStr += subStr
|
||||
}
|
||||
}
|
||||
}
|
||||
return contentStr
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m Message) ParseContent() []OpenAIMessageContent {
|
||||
var contentList []OpenAIMessageContent
|
||||
content, ok := m.Content.(string)
|
||||
if ok {
|
||||
contentList = append(contentList, OpenAIMessageContent{
|
||||
Type: ContentTypeText,
|
||||
Text: content,
|
||||
})
|
||||
return contentList
|
||||
}
|
||||
anyList, ok := m.Content.([]any)
|
||||
if ok {
|
||||
for _, contentItem := range anyList {
|
||||
contentMap, ok := contentItem.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
switch contentMap["type"] {
|
||||
case ContentTypeText:
|
||||
if subStr, ok := contentMap["text"].(string); ok {
|
||||
contentList = append(contentList, OpenAIMessageContent{
|
||||
Type: ContentTypeText,
|
||||
Text: subStr,
|
||||
})
|
||||
}
|
||||
case ContentTypeImageURL:
|
||||
if subObj, ok := contentMap["image_url"].(map[string]any); ok {
|
||||
contentList = append(contentList, OpenAIMessageContent{
|
||||
Type: ContentTypeImageURL,
|
||||
ImageURL: &ImageURL{
|
||||
Url: subObj["url"].(string),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return contentList
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ResponseFormat struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
type GeneralOpenAIRequest struct {
|
||||
Model string `json:"model,omitempty"`
|
||||
Messages []Message `json:"messages,omitempty"`
|
||||
Prompt any `json:"prompt,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
N int `json:"n,omitempty"`
|
||||
Input any `json:"input,omitempty"`
|
||||
Instruction string `json:"instruction,omitempty"`
|
||||
Size string `json:"size,omitempty"`
|
||||
Functions any `json:"functions,omitempty"`
|
||||
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
||||
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
||||
ResponseFormat *ResponseFormat `json:"response_format,omitempty"`
|
||||
Seed float64 `json:"seed,omitempty"`
|
||||
Tools any `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
}
|
||||
|
||||
func (r GeneralOpenAIRequest) ParseInput() []string {
|
||||
if r.Input == nil {
|
||||
return nil
|
||||
}
|
||||
var input []string
|
||||
switch r.Input.(type) {
|
||||
case string:
|
||||
input = []string{r.Input.(string)}
|
||||
case []any:
|
||||
input = make([]string, 0, len(r.Input.([]any)))
|
||||
for _, item := range r.Input.([]any) {
|
||||
if str, ok := item.(string); ok {
|
||||
input = append(input, str)
|
||||
}
|
||||
}
|
||||
}
|
||||
return input
|
||||
}
|
||||
|
||||
type ChatRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages []Message `json:"messages"`
|
||||
MaxTokens int `json:"max_tokens"`
|
||||
}
|
||||
|
||||
type TextRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages []Message `json:"messages"`
|
||||
Prompt string `json:"prompt"`
|
||||
MaxTokens int `json:"max_tokens"`
|
||||
//Stream bool `json:"stream"`
|
||||
}
|
||||
|
||||
// ImageRequest docs: https://platform.openai.com/docs/api-reference/images/create
|
||||
type ImageRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt" binding:"required"`
|
||||
N int `json:"n,omitempty"`
|
||||
Size string `json:"size,omitempty"`
|
||||
Quality string `json:"quality,omitempty"`
|
||||
ResponseFormat string `json:"response_format,omitempty"`
|
||||
Style string `json:"style,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
}
|
||||
|
||||
type WhisperJSONResponse struct {
|
||||
Text string `json:"text,omitempty"`
|
||||
}
|
||||
|
||||
type WhisperVerboseJSONResponse struct {
|
||||
Task string `json:"task,omitempty"`
|
||||
Language string `json:"language,omitempty"`
|
||||
Duration float64 `json:"duration,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
Segments []Segment `json:"segments,omitempty"`
|
||||
}
|
||||
|
||||
type Segment struct {
|
||||
Id int `json:"id"`
|
||||
Seek int `json:"seek"`
|
||||
Start float64 `json:"start"`
|
||||
End float64 `json:"end"`
|
||||
Text string `json:"text"`
|
||||
Tokens []int `json:"tokens"`
|
||||
Temperature float64 `json:"temperature"`
|
||||
AvgLogprob float64 `json:"avg_logprob"`
|
||||
CompressionRatio float64 `json:"compression_ratio"`
|
||||
NoSpeechProb float64 `json:"no_speech_prob"`
|
||||
}
|
||||
|
||||
type TextToSpeechRequest struct {
|
||||
Model string `json:"model" binding:"required"`
|
||||
Input string `json:"input" binding:"required"`
|
||||
Voice string `json:"voice" binding:"required"`
|
||||
Speed float64 `json:"speed"`
|
||||
ResponseFormat string `json:"response_format"`
|
||||
}
|
||||
|
||||
type Usage struct {
|
||||
PromptTokens int `json:"prompt_tokens"`
|
||||
CompletionTokens int `json:"completion_tokens"`
|
||||
TotalTokens int `json:"total_tokens"`
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
Message string `json:"message"`
|
||||
Type string `json:"type"`
|
||||
Param string `json:"param"`
|
||||
Code any `json:"code"`
|
||||
}
|
||||
|
||||
type ErrorWithStatusCode struct {
|
||||
Error
|
||||
StatusCode int `json:"status_code"`
|
||||
}
|
||||
|
||||
type SlimTextResponse struct {
|
||||
Choices []TextResponseChoice `json:"choices"`
|
||||
Usage `json:"usage"`
|
||||
Error Error `json:"error"`
|
||||
}
|
||||
|
||||
type TextResponseChoice struct {
|
||||
Index int `json:"index"`
|
||||
Message `json:"message"`
|
||||
FinishReason string `json:"finish_reason"`
|
||||
}
|
||||
|
||||
type TextResponse struct {
|
||||
Id string `json:"id"`
|
||||
Model string `json:"model,omitempty"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Choices []TextResponseChoice `json:"choices"`
|
||||
Usage `json:"usage"`
|
||||
}
|
||||
|
||||
type EmbeddingResponseItem struct {
|
||||
Object string `json:"object"`
|
||||
Index int `json:"index"`
|
||||
Embedding []float64 `json:"embedding"`
|
||||
}
|
||||
|
||||
type EmbeddingResponse struct {
|
||||
Object string `json:"object"`
|
||||
Data []EmbeddingResponseItem `json:"data"`
|
||||
Model string `json:"model"`
|
||||
Usage `json:"usage"`
|
||||
}
|
||||
|
||||
type ImageResponse struct {
|
||||
Created int `json:"created"`
|
||||
Data []struct {
|
||||
Url string `json:"url"`
|
||||
}
|
||||
}
|
||||
|
||||
type ChatCompletionsStreamResponseChoice struct {
|
||||
Delta struct {
|
||||
Content string `json:"content"`
|
||||
} `json:"delta"`
|
||||
FinishReason *string `json:"finish_reason,omitempty"`
|
||||
}
|
||||
|
||||
type ChatCompletionsStreamResponse struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Choices []ChatCompletionsStreamResponseChoice `json:"choices"`
|
||||
}
|
||||
|
||||
type CompletionsStreamResponse struct {
|
||||
Choices []struct {
|
||||
Text string `json:"text"`
|
||||
FinishReason string `json:"finish_reason"`
|
||||
} `json:"choices"`
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package openai
|
||||
|
||||
func ErrorWrapper(err error, code string, statusCode int) *ErrorWithStatusCode {
|
||||
Error := Error{
|
||||
Message: err.Error(),
|
||||
Type: "one_api_error",
|
||||
Code: code,
|
||||
}
|
||||
return &ErrorWithStatusCode{
|
||||
Error: Error,
|
||||
StatusCode: statusCode,
|
||||
}
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
package tencent
|
||||
|
||||
import (
|
||||
"one-api/relay/channel/openai"
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type ChatRequest struct {
|
||||
AppId int64 `json:"app_id"` // 腾讯云账号的 APPID
|
||||
SecretId string `json:"secret_id"` // 官网 SecretId
|
||||
// Timestamp当前 UNIX 时间戳,单位为秒,可记录发起 API 请求的时间。
|
||||
// 例如1529223702,如果与当前时间相差过大,会引起签名过期错误
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
// Expired 签名的有效期,是一个符合 UNIX Epoch 时间戳规范的数值,
|
||||
// 单位为秒;Expired 必须大于 Timestamp 且 Expired-Timestamp 小于90天
|
||||
Expired int64 `json:"expired"`
|
||||
QueryID string `json:"query_id"` //请求 Id,用于问题排查
|
||||
// Temperature 较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定
|
||||
// 默认 1.0,取值区间为[0.0,2.0],非必要不建议使用,不合理的取值会影响效果
|
||||
// 建议该参数和 top_p 只设置1个,不要同时更改 top_p
|
||||
Temperature float64 `json:"temperature"`
|
||||
// TopP 影响输出文本的多样性,取值越大,生成文本的多样性越强
|
||||
// 默认1.0,取值区间为[0.0, 1.0],非必要不建议使用, 不合理的取值会影响效果
|
||||
// 建议该参数和 temperature 只设置1个,不要同时更改
|
||||
TopP float64 `json:"top_p"`
|
||||
// Stream 0:同步,1:流式 (默认,协议:SSE)
|
||||
// 同步请求超时:60s,如果内容较长建议使用流式
|
||||
Stream int `json:"stream"`
|
||||
// Messages 会话内容, 长度最多为40, 按对话时间从旧到新在数组中排列
|
||||
// 输入 content 总数最大支持 3000 token。
|
||||
Messages []Message `json:"messages"`
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type Usage struct {
|
||||
InputTokens int `json:"input_tokens"`
|
||||
OutputTokens int `json:"output_tokens"`
|
||||
TotalTokens int `json:"total_tokens"`
|
||||
}
|
||||
|
||||
type ResponseChoices struct {
|
||||
FinishReason string `json:"finish_reason,omitempty"` // 流式结束标志位,为 stop 则表示尾包
|
||||
Messages Message `json:"messages,omitempty"` // 内容,同步模式返回内容,流模式为 null 输出 content 内容总数最多支持 1024token。
|
||||
Delta Message `json:"delta,omitempty"` // 内容,流模式返回内容,同步模式为 null 输出 content 内容总数最多支持 1024token。
|
||||
}
|
||||
|
||||
type ChatResponse struct {
|
||||
Choices []ResponseChoices `json:"choices,omitempty"` // 结果
|
||||
Created string `json:"created,omitempty"` // unix 时间戳的字符串
|
||||
Id string `json:"id,omitempty"` // 会话 id
|
||||
Usage openai.Usage `json:"usage,omitempty"` // token 数量
|
||||
Error Error `json:"error,omitempty"` // 错误信息 注意:此字段可能返回 null,表示取不到有效值
|
||||
Note string `json:"note,omitempty"` // 注释
|
||||
ReqID string `json:"req_id,omitempty"` // 唯一请求 Id,每次请求都会返回。用于反馈接口入参
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
package xunfei
|
||||
|
||||
import (
|
||||
"one-api/relay/channel/openai"
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type ChatRequest struct {
|
||||
Header struct {
|
||||
AppId string `json:"app_id"`
|
||||
} `json:"header"`
|
||||
Parameter struct {
|
||||
Chat struct {
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Auditing bool `json:"auditing,omitempty"`
|
||||
} `json:"chat"`
|
||||
} `json:"parameter"`
|
||||
Payload struct {
|
||||
Message struct {
|
||||
Text []Message `json:"text"`
|
||||
} `json:"message"`
|
||||
} `json:"payload"`
|
||||
}
|
||||
|
||||
type ChatResponseTextItem struct {
|
||||
Content string `json:"content"`
|
||||
Role string `json:"role"`
|
||||
Index int `json:"index"`
|
||||
}
|
||||
|
||||
type ChatResponse struct {
|
||||
Header struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Sid string `json:"sid"`
|
||||
Status int `json:"status"`
|
||||
} `json:"header"`
|
||||
Payload struct {
|
||||
Choices struct {
|
||||
Status int `json:"status"`
|
||||
Seq int `json:"seq"`
|
||||
Text []ChatResponseTextItem `json:"text"`
|
||||
} `json:"choices"`
|
||||
Usage struct {
|
||||
//Text struct {
|
||||
// QuestionTokens string `json:"question_tokens"`
|
||||
// PromptTokens string `json:"prompt_tokens"`
|
||||
// CompletionTokens string `json:"completion_tokens"`
|
||||
// TotalTokens string `json:"total_tokens"`
|
||||
//} `json:"text"`
|
||||
Text openai.Usage `json:"text"`
|
||||
} `json:"usage"`
|
||||
} `json:"payload"`
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
package zhipu
|
||||
|
||||
import (
|
||||
"one-api/relay/channel/openai"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
Prompt []Message `json:"prompt"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
RequestId string `json:"request_id,omitempty"`
|
||||
Incremental bool `json:"incremental,omitempty"`
|
||||
}
|
||||
|
||||
type ResponseData struct {
|
||||
TaskId string `json:"task_id"`
|
||||
RequestId string `json:"request_id"`
|
||||
TaskStatus string `json:"task_status"`
|
||||
Choices []Message `json:"choices"`
|
||||
openai.Usage `json:"usage"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
Code int `json:"code"`
|
||||
Msg string `json:"msg"`
|
||||
Success bool `json:"success"`
|
||||
Data ResponseData `json:"data"`
|
||||
}
|
||||
|
||||
type StreamMetaResponse struct {
|
||||
RequestId string `json:"request_id"`
|
||||
TaskId string `json:"task_id"`
|
||||
TaskStatus string `json:"task_status"`
|
||||
openai.Usage `json:"usage"`
|
||||
}
|
||||
|
||||
type tokenData struct {
|
||||
Token string
|
||||
ExpiryTime time.Time
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package constant
|
||||
|
||||
const (
|
||||
RelayModeUnknown = iota
|
||||
RelayModeChatCompletions
|
||||
RelayModeCompletions
|
||||
RelayModeEmbeddings
|
||||
RelayModeModerations
|
||||
RelayModeImagesGenerations
|
||||
RelayModeEdits
|
||||
RelayModeAudioSpeech
|
||||
RelayModeAudioTranscription
|
||||
RelayModeAudioTranslation
|
||||
)
|
||||
|
||||
var StopFinishReason = "stop"
|
||||
@@ -1,166 +0,0 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/relay/channel/openai"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func ShouldDisableChannel(err *openai.Error, statusCode int) bool {
|
||||
if !common.AutomaticDisableChannelEnabled {
|
||||
return false
|
||||
}
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
if statusCode == http.StatusUnauthorized {
|
||||
return true
|
||||
}
|
||||
if err.Type == "insufficient_quota" || err.Code == "invalid_api_key" || err.Code == "account_deactivated" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ShouldEnableChannel(err error, openAIErr *openai.Error) bool {
|
||||
if !common.AutomaticEnableChannelEnabled {
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if openAIErr != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type GeneralErrorResponse struct {
|
||||
Error openai.Error `json:"error"`
|
||||
Message string `json:"message"`
|
||||
Msg string `json:"msg"`
|
||||
Err string `json:"err"`
|
||||
ErrorMsg string `json:"error_msg"`
|
||||
Header struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"header"`
|
||||
Response struct {
|
||||
Error struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
func (e GeneralErrorResponse) ToMessage() string {
|
||||
if e.Error.Message != "" {
|
||||
return e.Error.Message
|
||||
}
|
||||
if e.Message != "" {
|
||||
return e.Message
|
||||
}
|
||||
if e.Msg != "" {
|
||||
return e.Msg
|
||||
}
|
||||
if e.Err != "" {
|
||||
return e.Err
|
||||
}
|
||||
if e.ErrorMsg != "" {
|
||||
return e.ErrorMsg
|
||||
}
|
||||
if e.Header.Message != "" {
|
||||
return e.Header.Message
|
||||
}
|
||||
if e.Response.Error.Message != "" {
|
||||
return e.Response.Error.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func RelayErrorHandler(resp *http.Response) (ErrorWithStatusCode *openai.ErrorWithStatusCode) {
|
||||
ErrorWithStatusCode = &openai.ErrorWithStatusCode{
|
||||
StatusCode: resp.StatusCode,
|
||||
Error: openai.Error{
|
||||
Message: "",
|
||||
Type: "upstream_error",
|
||||
Code: "bad_response_status_code",
|
||||
Param: strconv.Itoa(resp.StatusCode),
|
||||
},
|
||||
}
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var errResponse GeneralErrorResponse
|
||||
err = json.Unmarshal(responseBody, &errResponse)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if errResponse.Error.Message != "" {
|
||||
// OpenAI format error, so we override the default one
|
||||
ErrorWithStatusCode.Error = errResponse.Error
|
||||
} else {
|
||||
ErrorWithStatusCode.Error.Message = errResponse.ToMessage()
|
||||
}
|
||||
if ErrorWithStatusCode.Error.Message == "" {
|
||||
ErrorWithStatusCode.Error.Message = fmt.Sprintf("bad response status code %d", resp.StatusCode)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func GetFullRequestURL(baseURL string, requestURL string, channelType int) string {
|
||||
fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
|
||||
|
||||
if strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") {
|
||||
switch channelType {
|
||||
case common.ChannelTypeOpenAI:
|
||||
fullRequestURL = fmt.Sprintf("%s%s", baseURL, strings.TrimPrefix(requestURL, "/v1"))
|
||||
case common.ChannelTypeAzure:
|
||||
fullRequestURL = fmt.Sprintf("%s%s", baseURL, strings.TrimPrefix(requestURL, "/openai/deployments"))
|
||||
}
|
||||
}
|
||||
return fullRequestURL
|
||||
}
|
||||
|
||||
func PostConsumeQuota(ctx context.Context, tokenId int, quotaDelta int, totalQuota int, userId int, channelId int, modelRatio float64, groupRatio float64, modelName string, tokenName string) {
|
||||
// quotaDelta is remaining quota to be consumed
|
||||
err := model.PostConsumeTokenQuota(tokenId, quotaDelta)
|
||||
if err != nil {
|
||||
common.SysError("error consuming token remain quota: " + err.Error())
|
||||
}
|
||||
err = model.CacheUpdateUserQuota(userId)
|
||||
if err != nil {
|
||||
common.SysError("error update user quota cache: " + err.Error())
|
||||
}
|
||||
// totalQuota is total quota consumed
|
||||
if totalQuota != 0 {
|
||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, totalQuota, 0, modelName, tokenName, totalQuota, logContent)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, totalQuota)
|
||||
model.UpdateChannelUsedQuota(channelId, totalQuota)
|
||||
}
|
||||
if totalQuota <= 0 {
|
||||
common.LogError(ctx, fmt.Sprintf("totalQuota consumed is %d, something is wrong", totalQuota))
|
||||
}
|
||||
}
|
||||
|
||||
func GetAPIVersion(c *gin.Context) string {
|
||||
query := c.Request.URL.Query()
|
||||
apiVersion := query.Get("api-version")
|
||||
if apiVersion == "" {
|
||||
apiVersion = c.GetString("api_version")
|
||||
}
|
||||
return apiVersion
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"time"
|
||||
)
|
||||
|
||||
var HTTPClient *http.Client
|
||||
var ImpatientHTTPClient *http.Client
|
||||
|
||||
func init() {
|
||||
if common.RelayTimeout == 0 {
|
||||
HTTPClient = &http.Client{}
|
||||
} else {
|
||||
HTTPClient = &http.Client{
|
||||
Timeout: time.Duration(common.RelayTimeout) * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
ImpatientHTTPClient = &http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
}
|
||||
@@ -35,7 +35,6 @@ func SetApiRouter(router *gin.Engine) {
|
||||
selfRoute := userRoute.Group("/")
|
||||
selfRoute.Use(middleware.UserAuth())
|
||||
{
|
||||
selfRoute.GET("/dashboard", controller.GetUserDashboard)
|
||||
selfRoute.GET("/self", controller.GetSelf)
|
||||
selfRoute.PUT("/self", controller.UpdateSelf)
|
||||
selfRoute.DELETE("/self", controller.DeleteSelf)
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func SetRouter(router *gin.Engine, buildFS embed.FS) {
|
||||
func SetRouter(router *gin.Engine, buildFS embed.FS, indexPage []byte) {
|
||||
SetApiRouter(router)
|
||||
SetDashboardRouter(router)
|
||||
SetRelayRouter(router)
|
||||
@@ -20,7 +20,7 @@ func SetRouter(router *gin.Engine, buildFS embed.FS) {
|
||||
common.SysLog("FRONTEND_BASE_URL is ignored on master node")
|
||||
}
|
||||
if frontendBaseUrl == "" {
|
||||
SetWebRouter(router, buildFS)
|
||||
SetWebRouter(router, buildFS, indexPage)
|
||||
} else {
|
||||
frontendBaseUrl = strings.TrimSuffix(frontendBaseUrl, "/")
|
||||
router.NoRoute(func(c *gin.Context) {
|
||||
|
||||
@@ -2,7 +2,6 @@ package router
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"github.com/gin-contrib/gzip"
|
||||
"github.com/gin-contrib/static"
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -13,18 +12,17 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func SetWebRouter(router *gin.Engine, buildFS embed.FS) {
|
||||
indexPageData, _ := buildFS.ReadFile(fmt.Sprintf("web/build/%s/index.html", common.Theme))
|
||||
func SetWebRouter(router *gin.Engine, buildFS embed.FS, indexPage []byte) {
|
||||
router.Use(gzip.Gzip(gzip.DefaultCompression))
|
||||
router.Use(middleware.GlobalWebRateLimit())
|
||||
router.Use(middleware.Cache())
|
||||
router.Use(static.Serve("/", common.EmbedFolder(buildFS, fmt.Sprintf("web/build/%s", common.Theme))))
|
||||
router.Use(static.Serve("/", common.EmbedFolder(buildFS, "web/build")))
|
||||
router.NoRoute(func(c *gin.Context) {
|
||||
if strings.HasPrefix(c.Request.RequestURI, "/v1") || strings.HasPrefix(c.Request.RequestURI, "/api") {
|
||||
controller.RelayNotFound(c)
|
||||
return
|
||||
}
|
||||
c.Header("Cache-Control", "no-cache")
|
||||
c.Data(http.StatusOK, "text/html; charset=utf-8", indexPageData)
|
||||
c.Data(http.StatusOK, "text/html; charset=utf-8", indexPage)
|
||||
})
|
||||
}
|
||||
|
||||
0
web/berry/.gitignore → web/.gitignore
vendored
@@ -1,38 +1,21 @@
|
||||
# One API 的前端界面
|
||||
# React Template
|
||||
|
||||
> 每个文件夹代表一个主题,欢迎提交你的主题
|
||||
## Basic Usages
|
||||
|
||||
## 提交新的主题
|
||||
```shell
|
||||
# Runs the app in the development mode
|
||||
npm start
|
||||
|
||||
> 欢迎在页面底部保留你和 One API 的版权信息以及指向链接
|
||||
# Builds the app for production to the `build` folder
|
||||
npm run build
|
||||
```
|
||||
|
||||
1. 在 `web` 文件夹下新建一个文件夹,文件夹名为主题名。
|
||||
2. 把你的主题文件放到这个文件夹下。
|
||||
3. 修改你的 `package.json` 文件,把 `build` 命令改为:`"build": "react-scripts build && mv -f build ../build/default"`,其中 `default` 为你的主题名。
|
||||
4. 修改 `common/constants.go` 中的 `ValidThemes`,把你的主题名称注册进去。
|
||||
5. 修改 `web/THEMES` 文件,这里也需要同步修改。
|
||||
If you want to change the default server, please set `REACT_APP_SERVER` environment variables before build,
|
||||
for example: `REACT_APP_SERVER=http://your.domain.com`.
|
||||
|
||||
## 主题列表
|
||||
Before you start editing, make sure your `Actions on Save` options have `Optimize imports` & `Run Prettier` enabled.
|
||||
|
||||
### 主题:default
|
||||
## Reference
|
||||
|
||||
默认主题,由 [JustSong](https://github.com/songquanpeng) 开发。
|
||||
|
||||
预览:
|
||||
|||
|
||||
|:---:|:---:|
|
||||
|
||||
### 主题:berry
|
||||
|
||||
由 [MartialBE](https://github.com/MartialBE) 开发。
|
||||
|
||||
预览:
|
||||
|||
|
||||
|:---:|:---:|
|
||||
|||
|
||||
|||
|
||||
|||
|
||||
|
||||
#### 开发说明
|
||||
|
||||
请查看 [web/berry/README.md](https://github.com/songquanpeng/one-api/tree/main/web/berry/README.md)
|
||||
1. https://github.com/OIerDb-ng/OIerDb
|
||||
2. https://github.com/cornflourblue/react-hooks-redux-registration-login-example
|
||||
@@ -1,2 +0,0 @@
|
||||
default
|
||||
berry
|
||||
@@ -1,61 +0,0 @@
|
||||
# One API 前端界面
|
||||
|
||||
这个项目是 One API 的前端界面,它基于 [Berry Free React Admin Template](https://github.com/codedthemes/berry-free-react-admin-template) 进行开发。
|
||||
|
||||
## 使用的开源项目
|
||||
|
||||
使用了以下开源项目作为我们项目的一部分:
|
||||
|
||||
- [Berry Free React Admin Template](https://github.com/codedthemes/berry-free-react-admin-template)
|
||||
- [minimal-ui-kit](minimal-ui-kit)
|
||||
|
||||
## 开发说明
|
||||
|
||||
当添加新的渠道时,需要修改以下地方:
|
||||
|
||||
1. `web/berry/src/constants/ChannelConstants.js`
|
||||
|
||||
在该文件中的 `CHANNEL_OPTIONS` 添加新的渠道
|
||||
|
||||
```js
|
||||
export const CHANNEL_OPTIONS = {
|
||||
//key 为渠道ID
|
||||
1: {
|
||||
key: 1, // 渠道ID
|
||||
text: "OpenAI", // 渠道名称
|
||||
value: 1, // 渠道ID
|
||||
color: "primary", // 渠道列表显示的颜色
|
||||
},
|
||||
};
|
||||
```
|
||||
|
||||
2. `web/berry/src/views/Channel/type/Config.js`
|
||||
|
||||
在该文件中的`typeConfig`添加新的渠道配置, 如果无需配置,可以不添加
|
||||
|
||||
```js
|
||||
const typeConfig = {
|
||||
// key 为渠道ID
|
||||
3: {
|
||||
inputLabel: {
|
||||
// 输入框名称 配置
|
||||
// 对应的字段名称
|
||||
base_url: "AZURE_OPENAI_ENDPOINT",
|
||||
other: "默认 API 版本",
|
||||
},
|
||||
prompt: {
|
||||
// 输入框提示 配置
|
||||
// 对应的字段名称
|
||||
base_url: "请填写AZURE_OPENAI_ENDPOINT",
|
||||
|
||||
// 注意:通过判断 `other` 是否有值来判断是否需要显示 `other` 输入框, 默认是没有值的
|
||||
other: "请输入默认API版本,例如:2023-06-01-preview",
|
||||
},
|
||||
modelGroup: "openai", // 模型组名称,这个值是给 填入渠道支持模型 按钮使用的。 填入渠道支持模型 按钮会根据这个值来获取模型组,如果填写默认是 openai
|
||||
},
|
||||
};
|
||||
```
|
||||
|
||||
## 许可证
|
||||
|
||||
本项目中使用的代码遵循 MIT 许可证。
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "esnext",
|
||||
"module": "commonjs",
|
||||
"baseUrl": "src"
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules"]
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
{
|
||||
"name": "one_api_web",
|
||||
"version": "1.0.0",
|
||||
"proxy": "http://127.0.0.1:3000",
|
||||
"private": true,
|
||||
"homepage": "",
|
||||
"dependencies": {
|
||||
"@emotion/cache": "^11.9.3",
|
||||
"@emotion/react": "^11.9.3",
|
||||
"@emotion/styled": "^11.9.3",
|
||||
"@mui/icons-material": "^5.8.4",
|
||||
"@mui/lab": "^5.0.0-alpha.88",
|
||||
"@mui/material": "^5.8.6",
|
||||
"@mui/system": "^5.8.6",
|
||||
"@mui/utils": "^5.8.6",
|
||||
"@mui/x-date-pickers": "^6.18.5",
|
||||
"@tabler/icons-react": "^2.44.0",
|
||||
"apexcharts": "^3.35.3",
|
||||
"axios": "^0.27.2",
|
||||
"dayjs": "^1.11.10",
|
||||
"formik": "^2.2.9",
|
||||
"framer-motion": "^6.3.16",
|
||||
"history": "^5.3.0",
|
||||
"marked": "^4.1.1",
|
||||
"material-ui-popup-state": "^4.0.1",
|
||||
"notistack": "^3.0.1",
|
||||
"prop-types": "^15.8.1",
|
||||
"react": "^18.2.0",
|
||||
"react-apexcharts": "^1.4.0",
|
||||
"react-device-detect": "^2.2.2",
|
||||
"react-dom": "^18.2.0",
|
||||
"react-perfect-scrollbar": "^1.5.8",
|
||||
"react-redux": "^8.0.2",
|
||||
"react-router": "6.3.0",
|
||||
"react-router-dom": "6.3.0",
|
||||
"react-scripts": "^5.0.1",
|
||||
"react-turnstile": "^1.1.2",
|
||||
"redux": "^4.2.0",
|
||||
"yup": "^0.32.11"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "react-scripts start",
|
||||
"build": "react-scripts build && mv -f build ../build/berry",
|
||||
"test": "react-scripts test",
|
||||
"eject": "react-scripts eject"
|
||||
},
|
||||
"eslintConfig": {
|
||||
"extends": [
|
||||
"react-app"
|
||||
]
|
||||
},
|
||||
"babel": {
|
||||
"presets": [
|
||||
"@babel/preset-react"
|
||||
]
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
"defaults",
|
||||
"not IE 11"
|
||||
],
|
||||
"development": [
|
||||
"last 1 chrome version",
|
||||
"last 1 firefox version",
|
||||
"last 1 safari version"
|
||||
]
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.21.4",
|
||||
"@babel/eslint-parser": "^7.21.3",
|
||||
"eslint": "^8.38.0",
|
||||
"eslint-config-prettier": "^8.8.0",
|
||||
"eslint-config-react-app": "^7.0.1",
|
||||
"eslint-plugin-flowtype": "^8.0.3",
|
||||
"eslint-plugin-import": "^2.27.5",
|
||||
"eslint-plugin-jsx-a11y": "^6.7.1",
|
||||
"eslint-plugin-prettier": "^4.2.1",
|
||||
"eslint-plugin-react": "^7.32.2",
|
||||
"eslint-plugin-react-hooks": "^4.6.0",
|
||||
"immutable": "^4.3.0",
|
||||
"prettier": "^2.8.7",
|
||||
"sass": "^1.53.0"
|
||||
}
|
||||
}
|
||||
|
Before Width: | Height: | Size: 40 KiB |
@@ -1,26 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>One API</title>
|
||||
<link rel="icon" href="/favicon.ico" />
|
||||
<!-- Meta Tags-->
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<meta name="theme-color" content="#2296f3" />
|
||||
<meta
|
||||
name="description"
|
||||
content="OpenAI 接口聚合管理,支持多种渠道包括 Azure,可用于二次分发管理 key,仅单可执行文件,已打包好 Docker 镜像,一键部署,开箱即用"
|
||||
/>
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com" />
|
||||
<link
|
||||
href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&family=Poppins:wght@400;500;600;700&family=Roboto:wght@400;500;700&display=swap"
|
||||
rel="stylesheet"
|
||||
/>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||
<div id="root"></div>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,43 +0,0 @@
|
||||
import { useSelector } from 'react-redux';
|
||||
|
||||
import { ThemeProvider } from '@mui/material/styles';
|
||||
import { CssBaseline, StyledEngineProvider } from '@mui/material';
|
||||
|
||||
// routing
|
||||
import Routes from 'routes';
|
||||
|
||||
// defaultTheme
|
||||
import themes from 'themes';
|
||||
|
||||
// project imports
|
||||
import NavigationScroll from 'layout/NavigationScroll';
|
||||
|
||||
// auth
|
||||
import UserProvider from 'contexts/UserContext';
|
||||
import StatusProvider from 'contexts/StatusContext';
|
||||
import { SnackbarProvider } from 'notistack';
|
||||
|
||||
// ==============================|| APP ||============================== //
|
||||
|
||||
const App = () => {
|
||||
const customization = useSelector((state) => state.customization);
|
||||
|
||||
return (
|
||||
<StyledEngineProvider injectFirst>
|
||||
<ThemeProvider theme={themes(customization)}>
|
||||
<CssBaseline />
|
||||
<NavigationScroll>
|
||||
<SnackbarProvider autoHideDuration={5000} maxSnack={3} anchorOrigin={{ vertical: 'top', horizontal: 'right' }}>
|
||||
<UserProvider>
|
||||
<StatusProvider>
|
||||
<Routes />
|
||||
</StatusProvider>
|
||||
</UserProvider>
|
||||
</SnackbarProvider>
|
||||
</NavigationScroll>
|
||||
</ThemeProvider>
|
||||
</StyledEngineProvider>
|
||||
);
|
||||
};
|
||||
|
||||
export default App;
|
||||
@@ -1,40 +0,0 @@
|
||||
<svg width="480" height="360" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd"
|
||||
d="M0 198.781c0 41.457 14.945 79.235 39.539 107.785 28.214 32.765 69.128 53.365 114.734 53.434a148.458 148.458 0 0056.495-11.036c9.051-3.699 19.182-3.274 27.948 1.107a75.774 75.774 0 0033.957 8.011c5.023 0 9.942-.495 14.7-1.434 13.581-2.67 25.94-8.99 36.089-17.94 6.379-5.627 14.548-8.456 22.898-8.446h.142c27.589 0 53.215-8.732 74.492-23.696 19.021-13.36 34.554-31.696 44.904-53.225C474.92 234.581 480 213.388 480 190.958c0-76.931-59.774-139.305-133.498-139.305-7.516 0-14.88.663-22.063 1.899C305.418 21.42 271.355 0 232.498 0a103.647 103.647 0 00-45.879 10.661c-13.24 6.487-25.011 15.705-34.641 26.939-32.697.544-62.93 11.69-87.675 30.291C25.351 97.155 0 144.882 0 198.781z"
|
||||
fill="url(#prefix__paint0_linear)" opacity=".2" />
|
||||
<g filter="url(#prefix__filter0_d)">
|
||||
<circle opacity=".15" cx="182.109" cy="97.623" r="44.623" fill="#FFC107" />
|
||||
<circle cx="182.109" cy="97.623" r="23.406" fill="url(#prefix__paint1_linear)" />
|
||||
<path fill-rule="evenodd" clip-rule="evenodd"
|
||||
d="M244.878 306.611c34.56 0 62.575-28.016 62.575-62.575 0-34.56-28.015-62.576-62.575-62.576-34.559 0-62.575 28.016-62.575 62.576 0 34.559 28.016 62.575 62.575 62.575zm0-23.186c21.754 0 39.389-17.635 39.389-39.389 0-21.755-17.635-39.39-39.389-39.39s-39.389 17.635-39.389 39.39c0 21.754 17.635 39.389 39.389 39.389z"
|
||||
fill="#061B64" />
|
||||
<path fill-rule="evenodd" clip-rule="evenodd"
|
||||
d="M174.965 264.592c0-4.133-1.492-5.625-5.637-5.625h-11.373v-66.611c0-4.476-1.492-5.637-5.638-5.637h-9.172a9.866 9.866 0 00-7.948 3.974l-55.03 68.274a11.006 11.006 0 00-1.957 6.787v5.968c0 4.145 1.492 5.637 5.625 5.637h54.676v21.707c0 4.133 1.492 5.625 5.625 5.625h8.12c4.146 0 5.638-1.492 5.638-5.625v-21.707h11.434c4.414 0 5.637-1.492 5.637-5.637v-7.13zm-72.42-5.625l35.966-44.415v44.415h-35.966zM411.607 264.592c0-4.133-1.492-5.625-5.638-5.625h-11.422v-66.611c0-4.476-1.492-5.637-5.637-5.637h-9.111a9.87 9.87 0 00-7.949 3.974l-55.03 68.274a11.011 11.011 0 00-1.981 6.787v5.968c0 4.145 1.492 5.637 5.626 5.637h54.687v21.707c0 4.133 1.492 5.625 5.626 5.625h8.12c4.145 0 5.637-1.492 5.637-5.625v-21.707h11.434c4.476 0 5.638-1.492 5.638-5.637v-7.13zm-72.42-5.625l35.965-44.415v44.415h-35.965z"
|
||||
fill="#2065D1" />
|
||||
<path opacity=".24"
|
||||
d="M425.621 117.222a8.267 8.267 0 00-9.599-8.157 11.129 11.129 0 00-9.784-5.87h-.403a13.23 13.23 0 00-20.365-14.078 13.23 13.23 0 00-5.316 14.078h-.403a11.153 11.153 0 100 22.293h38.68v-.073a8.279 8.279 0 007.19-8.193zM104.258 199.045a7.093 7.093 0 00-7.093-7.092c-.381.007-.761.039-1.138.097a9.552 9.552 0 00-8.425-5.026h-.343a11.348 11.348 0 10-22.012 0h-.342a9.564 9.564 0 100 19.114h33.177v-.061a7.107 7.107 0 006.176-7.032z"
|
||||
fill="#2065D1" />
|
||||
</g>
|
||||
<defs>
|
||||
<linearGradient id="prefix__paint0_linear" x1="328.81" y1="424.032" x2="505.393" y2="26.048"
|
||||
gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#2065D1" />
|
||||
<stop offset="1" stop-color="#2065D1" stop-opacity=".01" />
|
||||
</linearGradient>
|
||||
<linearGradient id="prefix__paint1_linear" x1="135.297" y1="97.623" x2="182.109" y2="144.436"
|
||||
gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FFE16A" />
|
||||
<stop offset="1" stop-color="#B78103" />
|
||||
</linearGradient>
|
||||
<filter id="prefix__filter0_d" x="51" y="49" width="394.621" height="277.611" filterUnits="userSpaceOnUse"
|
||||
color-interpolation-filters="sRGB">
|
||||
<feFlood flood-opacity="0" result="BackgroundImageFix" />
|
||||
<feColorMatrix in="SourceAlpha" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0" />
|
||||
<feOffset dx="8" dy="8" />
|
||||
<feGaussianBlur stdDeviation="6" />
|
||||
<feColorMatrix values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.12 0" />
|
||||
<feBlend in2="BackgroundImageFix" result="effect1_dropShadow" />
|
||||
<feBlend in="SourceGraphic" in2="effect1_dropShadow" result="shape" />
|
||||
</filter>
|
||||
</defs>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 3.9 KiB |
|
Before Width: | Height: | Size: 16 KiB |
@@ -1,39 +0,0 @@
|
||||
<svg width="670" height="903" viewBox="0 0 670 903" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<mask id="mask0" mask-type="alpha" maskUnits="userSpaceOnUse" x="0" y="0" width="670" height="903">
|
||||
<g opacity="0.2">
|
||||
<path d="M0 0H670V903H0V0Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask0)">
|
||||
<path d="M2030.91 374.849L426.331 1300.78" stroke="#8492C4"/>
|
||||
<path d="M426.409 -527.071L2030.72 399.311" stroke="#8492C4"/>
|
||||
<path d="M1919.22 310.39L314.731 1236.47" stroke="#8492C4"/>
|
||||
<path d="M314.731 -462.612L1919.22 463.467" stroke="#8492C4"/>
|
||||
<path d="M1807.54 245.932L203.055 1172.01" stroke="#8492C4"/>
|
||||
<path d="M203.052 -398.154L1807.54 527.925" stroke="#8492C4"/>
|
||||
<path d="M1695.87 181.473L91.3788 1107.55" stroke="#8492C4"/>
|
||||
<path d="M91.3744 -333.695L1695.86 592.384" stroke="#8492C4"/>
|
||||
<path d="M1584.19 117.014L-20.3012 1043.09" stroke="#8492C4"/>
|
||||
<path d="M-20.3044 -269.237L1584.19 656.843" stroke="#8492C4"/>
|
||||
<path d="M1472.51 52.5562L-131.98 978.636" stroke="#8492C4"/>
|
||||
<path d="M-131.983 -204.778L1472.51 721.301" stroke="#8492C4"/>
|
||||
<path d="M1360.83 -11.9023L-243.658 914.177" stroke="#8492C4"/>
|
||||
<path d="M-243.662 -140.319L1360.83 785.76" stroke="#8492C4"/>
|
||||
<path d="M1249.15 -76.3613L-355.336 849.718" stroke="#8492C4"/>
|
||||
<path d="M-355.341 -75.8608L1249.15 850.219" stroke="#8492C4"/>
|
||||
<path d="M1137.48 -140.819L-467.014 785.26" stroke="#8492C4"/>
|
||||
<path d="M-467.017 -11.4023L1137.47 914.677" stroke="#8492C4"/>
|
||||
<path d="M1025.8 -205.278L-578.692 720.801" stroke="#8492C4"/>
|
||||
<path d="M-578.693 53.0562L1025.8 979.136" stroke="#8492C4"/>
|
||||
<path d="M914.119 -269.736L-690.371 656.343" stroke="#8492C4"/>
|
||||
<path d="M-690.379 117.515L914.111 1043.59" stroke="#8492C4"/>
|
||||
<path d="M802.441 -334.195L-802.052 591.887" stroke="#8492C4"/>
|
||||
<path d="M-802.055 181.974L802.435 1108.05" stroke="#8492C4"/>
|
||||
<path d="M690.762 -398.654L-913.728 527.426" stroke="#8492C4"/>
|
||||
<path d="M-913.731 246.432L690.759 1172.51" stroke="#8492C4"/>
|
||||
<path d="M579.084 -463.112L-1025.41 462.967" stroke="#8492C4"/>
|
||||
<path d="M-1025.41 310.891L579.083 1236.97" stroke="#8492C4"/>
|
||||
<path d="M467.406 -527.571L-1136.91 398.811" stroke="#8492C4"/>
|
||||
<path d="M-1137.09 375.35L467.397 1301.43" stroke="#8492C4"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 2.2 KiB |
@@ -1,39 +0,0 @@
|
||||
<svg width="670" height="903" viewBox="0 0 670 903" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<mask id="mask0" mask-type="alpha" maskUnits="userSpaceOnUse" x="0" y="0" width="670" height="903">
|
||||
<g opacity="0.2">
|
||||
<path d="M0 0H670V903H0V0Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask0)">
|
||||
<path d="M2030.91 374.849L426.331 1300.78" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M426.409 -527.071L2030.72 399.311" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M1919.22 310.39L314.731 1236.47" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M314.731 -462.612L1919.22 463.467" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M1807.54 245.932L203.055 1172.01" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M203.052 -398.154L1807.54 527.925" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M1695.87 181.473L91.3788 1107.55" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M91.3744 -333.695L1695.86 592.384" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M1584.19 117.014L-20.3012 1043.09" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M-20.3044 -269.237L1584.19 656.843" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M1472.51 52.5562L-131.98 978.636" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M-131.983 -204.778L1472.51 721.301" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M1360.83 -11.9023L-243.658 914.177" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M-243.662 -140.319L1360.83 785.76" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M1249.15 -76.3613L-355.336 849.718" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M-355.341 -75.8608L1249.15 850.219" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M1137.48 -140.819L-467.014 785.26" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M-467.017 -11.4023L1137.47 914.677" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M1025.8 -205.278L-578.692 720.801" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M-578.693 53.0562L1025.8 979.136" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M914.119 -269.736L-690.371 656.343" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M-690.379 117.515L914.111 1043.59" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M802.441 -334.195L-802.052 591.887" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M-802.055 181.974L802.435 1108.05" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M690.762 -398.654L-913.728 527.426" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M-913.731 246.432L690.759 1172.51" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M579.084 -463.112L-1025.41 462.967" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M-1025.41 310.891L579.083 1236.97" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M467.406 -527.571L-1136.91 398.811" stroke="rgba(0,0,0,0.30)"/>
|
||||
<path d="M-1137.09 375.35L467.397 1301.43" stroke="rgba(0,0,0,0.30)"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 2.4 KiB |
|
Before Width: | Height: | Size: 22 KiB |
|
Before Width: | Height: | Size: 272 KiB |
|
Before Width: | Height: | Size: 15 KiB |
@@ -1,5 +0,0 @@
|
||||
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M19 9H9C7.89543 9 7 9.89543 7 11V17C7 18.1046 7.89543 19 9 19H19C20.1046 19 21 18.1046 21 17V11C21 9.89543 20.1046 9 19 9Z" stroke="white" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
<path d="M14 16C15.1046 16 16 15.1046 16 14C16 12.8954 15.1046 12 14 12C12.8954 12 12 12.8954 12 14C12 15.1046 12.8954 16 14 16Z" fill="#90CAF9"/>
|
||||
<path d="M17 9V7C17 6.46957 16.7893 5.96086 16.4142 5.58579C16.0391 5.21071 15.5304 5 15 5H5C4.46957 5 3.96086 5.21071 3.58579 5.58579C3.21071 5.96086 3 6.46957 3 7V13C3 13.5304 3.21071 14.0391 3.58579 14.4142C3.96086 14.7893 4.46957 15 5 15H7" stroke="white" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 794 B |
@@ -1 +0,0 @@
|
||||
<svg t="1702350903010" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="4215" width="32" height="32"><path d="M512 85.333333C276.266667 85.333333 85.333333 276.266667 85.333333 512a426.410667 426.410667 0 0 0 291.754667 404.821333c21.333333 3.712 29.312-9.088 29.312-20.309333 0-10.112-0.554667-43.690667-0.554667-79.445333-107.178667 19.754667-134.912-26.112-143.445333-50.133334-4.821333-12.288-25.6-50.133333-43.733333-60.288-14.933333-7.978667-36.266667-27.733333-0.554667-28.245333 33.621333-0.554667 57.6 30.933333 65.621333 43.733333 38.4 64.512 99.754667 46.378667 124.245334 35.2 3.754667-27.733333 14.933333-46.378667 27.221333-57.045333-94.933333-10.666667-194.133333-47.488-194.133333-210.688 0-46.421333 16.512-84.778667 43.733333-114.688-4.266667-10.666667-19.2-54.4 4.266667-113.066667 0 0 35.712-11.178667 117.333333 43.776a395.946667 395.946667 0 0 1 106.666667-14.421333c36.266667 0 72.533333 4.778667 106.666666 14.378667 81.578667-55.466667 117.333333-43.690667 117.333334-43.690667 23.466667 58.666667 8.533333 102.4 4.266666 113.066667 27.178667 29.866667 43.733333 67.712 43.733334 114.645333 0 163.754667-99.712 200.021333-194.645334 210.688 15.445333 13.312 28.8 38.912 28.8 78.933333 0 57.045333-0.554667 102.912-0.554666 117.333334 0 11.178667 8.021333 24.490667 29.354666 20.224A427.349333 427.349333 0 0 0 938.666667 512c0-235.733333-190.933333-426.666667-426.666667-426.666667z" fill="#000000" p-id="4216"></path></svg>
|
||||
|
Before Width: | Height: | Size: 1.5 KiB |
@@ -1 +0,0 @@
|
||||
<svg height="62" viewBox="0 0 144 62" width="144" xmlns="http://www.w3.org/2000/svg"><path d="m111.34 23.88c-10.62-10.46-18.5-23.88-38.74-23.88h-1.2c-20.24 0-28.12 13.42-38.74 23.88-7.72 9.64-19.44 11.74-32.66 12.12v26h144v-26c-13.22-.38-24.94-2.48-32.66-12.12z" fill="#fff" fill-rule="evenodd"/></svg>
|
||||
|
Before Width: | Height: | Size: 302 B |
@@ -1,6 +0,0 @@
|
||||
<svg width="22" height="22" viewBox="0 0 22 22" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M5.06129 13.2253L4.31871 15.9975L1.60458 16.0549C0.793457 14.5504 0.333374 12.8292 0.333374 11C0.333374 9.23119 0.763541 7.56319 1.52604 6.09448H1.52662L3.94296 6.53748L5.00146 8.93932C4.77992 9.58519 4.65917 10.2785 4.65917 11C4.65925 11.783 4.80108 12.5332 5.06129 13.2253Z" fill="#FBBB00"/>
|
||||
<path d="M21.4804 9.00732C21.6029 9.65257 21.6668 10.3189 21.6668 11C21.6668 11.7637 21.5865 12.5086 21.4335 13.2271C20.9143 15.6722 19.5575 17.8073 17.678 19.3182L17.6774 19.3177L14.6339 19.1624L14.2031 16.4734C15.4503 15.742 16.425 14.5974 16.9384 13.2271H11.2346V9.00732H17.0216H21.4804Z" fill="#518EF8"/>
|
||||
<path d="M17.6772 19.3176L17.6777 19.3182C15.8498 20.7875 13.5277 21.6666 11 21.6666C6.93783 21.6666 3.40612 19.3962 1.60449 16.0549L5.0612 13.2253C5.96199 15.6294 8.28112 17.3408 11 17.3408C12.1686 17.3408 13.2634 17.0249 14.2029 16.4734L17.6772 19.3176Z" fill="#28B446"/>
|
||||
<path d="M17.8085 2.78892L14.353 5.61792C13.3807 5.01017 12.2313 4.65908 11 4.65908C8.21963 4.65908 5.85713 6.44896 5.00146 8.93925L1.52658 6.09442H1.526C3.30125 2.67171 6.8775 0.333252 11 0.333252C13.5881 0.333252 15.9612 1.25517 17.8085 2.78892Z" fill="#F14336"/>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 1.2 KiB |
@@ -1 +0,0 @@
|
||||
<svg t="1702350975929" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="2474" width="32" height="32"><path d="M512 1024C229.23264 1024 0 794.76736 0 512S229.23264 0 512 0s512 229.23264 512 512-229.23264 512-512 512z m212.0704-290.0992l41.5744 24.064c9.216 5.3248 14.5408 1.26976 11.83744-9.03168l-9.0112-34.4064a9.76896 9.76896 0 0 1 3.76832-10.4448C813.50656 674.75456 839.68 631.0912 839.68 582.32832c0-88.28928-85.79072-159.86688-191.61088-159.86688s-191.61088 71.5776-191.61088 159.86688c0 88.2688 85.79072 159.86688 191.61088 159.86688 22.9376 0 44.91264-3.35872 65.26976-9.5232a13.55776 13.55776 0 0 1 10.73152 1.2288z m-366.63296-116.08064a271.85152 271.85152 0 0 0 89.98912 10.89536 146.61632 146.61632 0 0 1-7.7824-47.16544c0-96.31744 94.33088-174.3872 210.71872-174.3872 4.07552 0 8.11008 0.08192 12.12416 0.28672C645.2224 315.84256 549.96992 245.76 435.03616 245.76 307.87584 245.76 204.8 331.55072 204.8 437.37088c0 58.1632 31.15008 110.2848 80.32256 145.42848 4.62848 3.31776 6.7584 9.13408 5.28384 14.62272l-10.83392 40.38656c-3.2768 12.1856 2.99008 16.95744 13.88544 10.58816l49.29536-28.79488a18.59584 18.59584 0 0 1 14.68416-1.78176z m353.73056-60.74368a26.0096 26.0096 0 1 1 0-52.0192 26.0096 26.0096 0 0 1 0 52.0192z m-126.976 0a26.0096 26.0096 0 1 1 0-52.0192 26.0096 26.0096 0 0 1 0 52.0192z m-72.66304-150.69184a30.59712 30.59712 0 1 1 0-61.19424 30.59712 30.59712 0 0 1 0 61.19424z m-153.74336 0a30.59712 30.59712 0 1 1 0-61.19424 30.59712 30.59712 0 0 1 0 61.19424z" fill="#2BD418" p-id="2475"></path></svg>
|
||||
|
Before Width: | Height: | Size: 1.5 KiB |
|
Before Width: | Height: | Size: 86 KiB |
|
Before Width: | Height: | Size: 174 KiB |
@@ -1,15 +0,0 @@
|
||||
<svg viewBox="0 0 590 265" xmlns="http://www.w3.org/2000/svg" version="1.1">
|
||||
|
||||
<g>
|
||||
<title>Layer 1</title>
|
||||
<ellipse transform="rotate(103.6 287.6 32.87)" id="svg_1" ry="28.14" rx="28.47" cy="32.87" cx="287.60001" fill="#0d161f"/>
|
||||
<path id="svg_2" d="m232.92,128.89c3.78,27.29 -1.81,55.44 -17.71,78.09a2.62,2.62 0 0 0 -0.06,2.92c1.24,1.92 2.96,5.05 5.56,4.94q5.25,-0.22 10.79,0.11a1.26,1.26 0 0 1 1.19,1.27l-0.4,42.53a1.31,1.31 0 0 1 -1.31,1.3q-16.77,-0.09 -36.53,0.01q-2.25,0.02 -3.71,-1.56q-16.02,-17.28 -31.98,-35.32c-5.13,-5.8 -10.18,-11.16 -14.86,-17.59a1.35,1.34 -31.1 0 1 0.5,-2q12.88,-6.32 22.13,-17.12q18.18,-21.23 15.08,-48.84q-2.66,-23.7 -22.4,-40.46q-23.43,-19.9 -54.88,-13.86c-4.1,0.79 -7.83,2.5 -11.72,4.12q-11.86,4.94 -20.59,14.64c-14.25,15.81 -20.07,36.4 -15.05,57.16q4.99,20.63 22.86,35.71c10.45,8.81 23.7,13.12 37.26,14.18q1.47,0.11 3.6,2.65c11.68,13.89 24.48,27.72 35.94,41.96a0.43,0.43 0 0 1 -0.21,0.68q-22.51,7.27 -47.37,5.37q-19.4,-1.47 -39.74,-11.22q-18.27,-8.75 -30.59,-21.28q-18.66,-18.98 -28.02,-43.57q-10.8,-28.4 -4.93,-58.67c1.59,-8.17 4.03,-17 7.42,-24.61q5.08,-11.38 11.61,-20.64q25.41,-36.03 68.45,-46.13q32.42,-7.61 64.23,3.92q25.31,9.17 43.2,27.31c16.85,17.09 28.91,40.01 32.24,64z" fill="#0d161f"/>
|
||||
<path id="svg_3" d="m499.47,180.61c6.45,13.53 16.44,21.75 31.96,22q11.94,0.19 22.17,-5.36q2.21,-1.2 3.93,0.69q12.56,13.78 24.89,28.47q1.21,1.44 1.44,3.13a0.95,0.95 0 0 1 -0.36,0.89c-1.62,1.23 -3.33,2.71 -5.03,3.69q-29.37,17.01 -62.47,11.31c-20.61,-3.55 -39.05,-15.24 -51.47,-32.51q-6.4,-8.89 -9.91,-17.08c-2.62,-6.12 -4.73,-13.3 -5.41,-20.08q-3.96,-39.88 22.94,-67.74c9.48,-9.81 21.15,-16.67 34.39,-19.49c16.54,-3.53 34.64,-1.83 48.77,7.1q13.92,8.79 21.13,20.4q11.07,17.84 10.48,38.92c-0.02,0.94 -0.21,1.81 -0.85,2.54q-7.73,8.77 -18.71,20.16c-1.28,1.32 -2.61,2.26 -4.51,2.23q-24.45,-0.37 -51.64,-0.41q-5.03,0 -10.84,-0.22a0.96,0.95 -11.7 0 0 -0.9,1.36zm1.12,-37.17q-0.55,1.19 -0.63,2.34q-0.08,1.01 0.94,1.03q19.01,0.25 36.98,0.01q0.5,0 0.94,-0.22q0.57,-0.28 0.44,-0.9q-2.34,-11.6 -14.11,-15.25q-3.59,-1.11 -6.44,-0.57q-13.07,2.5 -18.12,13.56z" fill="#0d161f"/>
|
||||
<path id="svg_4" d="m312.3,100.22a0.5,0.49 -22.1 0 0 0.84,0.35q2.76,-2.64 5.82,-4.31q8.45,-4.62 16.71,-6.57c15.81,-3.72 33.58,-3.2 48.2,3.95q24.49,11.98 35.05,35.76c4.66,10.5 5.44,22.96 5.5,35.35q0.21,49.99 -0.12,88q-0.03,3.06 -0.08,6.16a1.32,1.32 0 0 1 -1.33,1.3q-20.22,-0.18 -40.18,-0.23q-3.64,-0.01 -8.13,-0.44a1.06,1.05 -87.3 0 1 -0.95,-1.05q0.02,-45.49 -0.22,-92.99c-0.03,-6.25 -1.21,-13.88 -5.05,-18.95q-5.33,-7.03 -12.32,-10.18c-10.99,-4.93 -24.52,-1.84 -33.13,6.37q-10.01,9.53 -10.07,23.76q-0.11,25.46 -0.1,48.98c0,3.52 -0.06,8.31 -1.1,11.68c-4.37,14.04 -17.31,19.5 -31.04,16.77c-8.22,-1.64 -15.07,-7.75 -17.62,-15.62q-1.45,-4.49 -1.42,-10.2q0.3,-64.69 0.1,-129.86a0.47,0.47 0 0 1 0.47,-0.47l48.46,-0.35a1.56,1.55 89.4 0 1 1.56,1.54l0.15,11.25z" fill="#0d161f"/>
|
||||
<path id="svg_5" d="m265.63,344.43a2.02,2.01 76.7 0 0 -1.85,-1.15l-17.03,0.24a2.25,2.22 9.3 0 0 -2.06,1.46l-2.86,7.84a2.47,2.46 -79.1 0 1 -2.38,1.62l-6.23,-0.19q-1.19,-0.04 -0.88,-1.19q1.38,-5.23 2.81,-8.7c3.41,-8.3 6.48,-16.83 10.12,-25.35q2.96,-6.93 5.21,-14.24c0.46,-1.52 1.69,-2.64 3.37,-2.63c2.02,0 4.68,-0.78 5.7,1.58q7.68,17.74 18.16,44.75q0.96,2.46 1.48,5a0.67,0.66 84.3 0 1 -0.65,0.8l-6.05,-0.02q-2.16,-0.01 -3.1,-1.96l-3.76,-7.86zm-16.73,-10.31a0.34,0.34 0 0 0 0.32,0.47l12.85,-0.36a0.34,0.34 0 0 0 0.3,-0.48l-6.84,-14.7a0.34,0.34 0 0 0 -0.62,0.02l-6.01,15.05z" fill="#0d161f"/>
|
||||
<rect id="svg_6" rx="2.17" height="52.28" width="9.84" y="302.19" x="345.67" fill="#0d161f"/>
|
||||
<path id="svg_7" d="m303.07,338.46l-0.15,14.42q-0.01,1.55 -1.56,1.52l-5.84,-0.12q-1.79,-0.04 -1.81,-1.83c-0.24,-15.33 -0.25,-30.89 -0.27,-47.22q-0.01,-2.99 2.55,-3.06q12.47,-0.33 20.15,0.8q8.61,1.25 12.86,9.17c2.95,5.49 2.53,13.5 -1.5,18.65c-5.57,7.14 -14.88,6.62 -23.24,6.51a1.17,1.17 0 0 0 -1.19,1.16zm-0.15,-24.81l0.16,12.72a1.72,1.72 0 0 0 1.74,1.7l6.07,-0.08a10.01,7.98 -0.7 0 0 9.91,-8.1l0,-0.2a10.01,7.98 -0.7 0 0 -10.11,-7.86l-6.07,0.08a1.72,1.72 0 0 0 -1.7,1.74z" fill="#0d161f"/>
|
||||
<rect id="svg_8" rx="3.58" height="7.26" width="79.2" y="322.99" x="107" fill="#0d161f"/>
|
||||
<rect id="svg_9" rx="3.81" height="7.72" width="79.1" y="322.78" x="417.27" fill="#0d161f"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 4.2 KiB |
@@ -1,13 +0,0 @@
|
||||
<svg viewBox="0 0 590 360" xmlns="http://www.w3.org/2000/svg" version="1.1">
|
||||
<g>
|
||||
<ellipse transform="rotate(103.6 287.6 32.87)" id="svg_1" ry="28.14" rx="28.47" cy="32.87" cx="287.60001" fill="#0d161f"/>
|
||||
<path id="svg_2" d="m232.92,128.89c3.78,27.29 -1.81,55.44 -17.71,78.09a2.62,2.62 0 0 0 -0.06,2.92c1.24,1.92 2.96,5.05 5.56,4.94q5.25,-0.22 10.79,0.11a1.26,1.26 0 0 1 1.19,1.27l-0.4,42.53a1.31,1.31 0 0 1 -1.31,1.3q-16.77,-0.09 -36.53,0.01q-2.25,0.02 -3.71,-1.56q-16.02,-17.28 -31.98,-35.32c-5.13,-5.8 -10.18,-11.16 -14.86,-17.59a1.35,1.34 -31.1 0 1 0.5,-2q12.88,-6.32 22.13,-17.12q18.18,-21.23 15.08,-48.84q-2.66,-23.7 -22.4,-40.46q-23.43,-19.9 -54.88,-13.86c-4.1,0.79 -7.83,2.5 -11.72,4.12q-11.86,4.94 -20.59,14.64c-14.25,15.81 -20.07,36.4 -15.05,57.16q4.99,20.63 22.86,35.71c10.45,8.81 23.7,13.12 37.26,14.18q1.47,0.11 3.6,2.65c11.68,13.89 24.48,27.72 35.94,41.96a0.43,0.43 0 0 1 -0.21,0.68q-22.51,7.27 -47.37,5.37q-19.4,-1.47 -39.74,-11.22q-18.27,-8.75 -30.59,-21.28q-18.66,-18.98 -28.02,-43.57q-10.8,-28.4 -4.93,-58.67c1.59,-8.17 4.03,-17 7.42,-24.61q5.08,-11.38 11.61,-20.64q25.41,-36.03 68.45,-46.13q32.42,-7.61 64.23,3.92q25.31,9.17 43.2,27.31c16.85,17.09 28.91,40.01 32.24,64z" fill="#0d161f"/>
|
||||
<path id="svg_3" d="m499.47,180.61c6.45,13.53 16.44,21.75 31.96,22q11.94,0.19 22.17,-5.36q2.21,-1.2 3.93,0.69q12.56,13.78 24.89,28.47q1.21,1.44 1.44,3.13a0.95,0.95 0 0 1 -0.36,0.89c-1.62,1.23 -3.33,2.71 -5.03,3.69q-29.37,17.01 -62.47,11.31c-20.61,-3.55 -39.05,-15.24 -51.47,-32.51q-6.4,-8.89 -9.91,-17.08c-2.62,-6.12 -4.73,-13.3 -5.41,-20.08q-3.96,-39.88 22.94,-67.74c9.48,-9.81 21.15,-16.67 34.39,-19.49c16.54,-3.53 34.64,-1.83 48.77,7.1q13.92,8.79 21.13,20.4q11.07,17.84 10.48,38.92c-0.02,0.94 -0.21,1.81 -0.85,2.54q-7.73,8.77 -18.71,20.16c-1.28,1.32 -2.61,2.26 -4.51,2.23q-24.45,-0.37 -51.64,-0.41q-5.03,0 -10.84,-0.22a0.96,0.95 -11.7 0 0 -0.9,1.36zm1.12,-37.17q-0.55,1.19 -0.63,2.34q-0.08,1.01 0.94,1.03q19.01,0.25 36.98,0.01q0.5,0 0.94,-0.22q0.57,-0.28 0.44,-0.9q-2.34,-11.6 -14.11,-15.25q-3.59,-1.11 -6.44,-0.57q-13.07,2.5 -18.12,13.56z" fill="#0d161f"/>
|
||||
<path id="svg_4" d="m312.3,100.22a0.5,0.49 -22.1 0 0 0.84,0.35q2.76,-2.64 5.82,-4.31q8.45,-4.62 16.71,-6.57c15.81,-3.72 33.58,-3.2 48.2,3.95q24.49,11.98 35.05,35.76c4.66,10.5 5.44,22.96 5.5,35.35q0.21,49.99 -0.12,88q-0.03,3.06 -0.08,6.16a1.32,1.32 0 0 1 -1.33,1.3q-20.22,-0.18 -40.18,-0.23q-3.64,-0.01 -8.13,-0.44a1.06,1.05 -87.3 0 1 -0.95,-1.05q0.02,-45.49 -0.22,-92.99c-0.03,-6.25 -1.21,-13.88 -5.05,-18.95q-5.33,-7.03 -12.32,-10.18c-10.99,-4.93 -24.52,-1.84 -33.13,6.37q-10.01,9.53 -10.07,23.76q-0.11,25.46 -0.1,48.98c0,3.52 -0.06,8.31 -1.1,11.68c-4.37,14.04 -17.31,19.5 -31.04,16.77c-8.22,-1.64 -15.07,-7.75 -17.62,-15.62q-1.45,-4.49 -1.42,-10.2q0.3,-64.69 0.1,-129.86a0.47,0.47 0 0 1 0.47,-0.47l48.46,-0.35a1.56,1.55 89.4 0 1 1.56,1.54l0.15,11.25z" fill="#0d161f"/>
|
||||
<path id="svg_5" d="m265.63,344.43a2.02,2.01 76.7 0 0 -1.85,-1.15l-17.03,0.24a2.25,2.22 9.3 0 0 -2.06,1.46l-2.86,7.84a2.47,2.46 -79.1 0 1 -2.38,1.62l-6.23,-0.19q-1.19,-0.04 -0.88,-1.19q1.38,-5.23 2.81,-8.7c3.41,-8.3 6.48,-16.83 10.12,-25.35q2.96,-6.93 5.21,-14.24c0.46,-1.52 1.69,-2.64 3.37,-2.63c2.02,0 4.68,-0.78 5.7,1.58q7.68,17.74 18.16,44.75q0.96,2.46 1.48,5a0.67,0.66 84.3 0 1 -0.65,0.8l-6.05,-0.02q-2.16,-0.01 -3.1,-1.96l-3.76,-7.86zm-16.73,-10.31a0.34,0.34 0 0 0 0.32,0.47l12.85,-0.36a0.34,0.34 0 0 0 0.3,-0.48l-6.84,-14.7a0.34,0.34 0 0 0 -0.62,0.02l-6.01,15.05z" fill="#0d161f"/>
|
||||
<rect id="svg_6" rx="2.17" height="52.28" width="9.84" y="302.19" x="345.67" fill="#0d161f"/>
|
||||
<path id="svg_7" d="m303.07,338.46l-0.15,14.42q-0.01,1.55 -1.56,1.52l-5.84,-0.12q-1.79,-0.04 -1.81,-1.83c-0.24,-15.33 -0.25,-30.89 -0.27,-47.22q-0.01,-2.99 2.55,-3.06q12.47,-0.33 20.15,0.8q8.61,1.25 12.86,9.17c2.95,5.49 2.53,13.5 -1.5,18.65c-5.57,7.14 -14.88,6.62 -23.24,6.51a1.17,1.17 0 0 0 -1.19,1.16zm-0.15,-24.81l0.16,12.72a1.72,1.72 0 0 0 1.74,1.7l6.07,-0.08a10.01,7.98 -0.7 0 0 9.91,-8.1l0,-0.2a10.01,7.98 -0.7 0 0 -10.11,-7.86l-6.07,0.08a1.72,1.72 0 0 0 -1.7,1.74z" fill="#0d161f"/>
|
||||
<rect id="svg_8" rx="3.58" height="7.26" width="79.2" y="322.99" x="107" fill="#0d161f"/>
|
||||
<rect id="svg_9" rx="3.81" height="7.72" width="79.1" y="322.78" x="417.27" fill="#0d161f"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 4.1 KiB |
|
Before Width: | Height: | Size: 20 KiB |
@@ -1,157 +0,0 @@
|
||||
// paper & background
|
||||
$paper: #ffffff;
|
||||
|
||||
// primary
|
||||
$primaryLight: #eef2f6;
|
||||
$primaryMain: #2196f3;
|
||||
$primaryDark: #1e88e5;
|
||||
$primary200: #90caf9;
|
||||
$primary800: #1565c0;
|
||||
|
||||
// secondary
|
||||
$secondaryLight: #ede7f6;
|
||||
$secondaryMain: #673ab7;
|
||||
$secondaryDark: #5e35b1;
|
||||
$secondary200: #b39ddb;
|
||||
$secondary800: #4527a0;
|
||||
|
||||
// success Colors
|
||||
$successLight: #b9f6ca;
|
||||
$success200: #69f0ae;
|
||||
$successMain: #00e676;
|
||||
$successDark: #00c853;
|
||||
|
||||
// error
|
||||
$errorLight: #ef9a9a;
|
||||
$errorMain: #f44336;
|
||||
$errorDark: #c62828;
|
||||
|
||||
// orange
|
||||
$orangeLight: #fbe9e7;
|
||||
$orangeMain: #ffab91;
|
||||
$orangeDark: #d84315;
|
||||
|
||||
// warning
|
||||
$warningLight: #fff8e1;
|
||||
$warningMain: #ffe57f;
|
||||
$warningDark: #ffc107;
|
||||
|
||||
// grey
|
||||
$grey50: #f8fafc;
|
||||
$grey100: #eef2f6;
|
||||
$grey200: #e3e8ef;
|
||||
$grey300: #cdd5df;
|
||||
$grey500: #697586;
|
||||
$grey600: #4b5565;
|
||||
$grey700: #364152;
|
||||
$grey900: #121926;
|
||||
|
||||
// ==============================|| DARK THEME VARIANTS ||============================== //
|
||||
|
||||
// paper & background
|
||||
$darkBackground: #1a223f; // level 3
|
||||
$darkPaper: #111936; // level 4
|
||||
|
||||
// dark 800 & 900
|
||||
$darkLevel1: #29314f; // level 1
|
||||
$darkLevel2: #212946; // level 2
|
||||
|
||||
// primary dark
|
||||
$darkPrimaryLight: #eef2f6;
|
||||
$darkPrimaryMain: #2196f3;
|
||||
$darkPrimaryDark: #1e88e5;
|
||||
$darkPrimary200: #90caf9;
|
||||
$darkPrimary800: #1565c0;
|
||||
|
||||
// secondary dark
|
||||
$darkSecondaryLight: #d1c4e9;
|
||||
$darkSecondaryMain: #7c4dff;
|
||||
$darkSecondaryDark: #651fff;
|
||||
$darkSecondary200: #b39ddb;
|
||||
$darkSecondary800: #6200ea;
|
||||
|
||||
// text variants
|
||||
$darkTextTitle: #d7dcec;
|
||||
$darkTextPrimary: #bdc8f0;
|
||||
$darkTextSecondary: #8492c4;
|
||||
|
||||
// ==============================|| JAVASCRIPT ||============================== //
|
||||
|
||||
:export {
|
||||
// paper & background
|
||||
paper: $paper;
|
||||
|
||||
// primary
|
||||
primaryLight: $primaryLight;
|
||||
primary200: $primary200;
|
||||
primaryMain: $primaryMain;
|
||||
primaryDark: $primaryDark;
|
||||
primary800: $primary800;
|
||||
|
||||
// secondary
|
||||
secondaryLight: $secondaryLight;
|
||||
secondary200: $secondary200;
|
||||
secondaryMain: $secondaryMain;
|
||||
secondaryDark: $secondaryDark;
|
||||
secondary800: $secondary800;
|
||||
|
||||
// success
|
||||
successLight: $successLight;
|
||||
success200: $success200;
|
||||
successMain: $successMain;
|
||||
successDark: $successDark;
|
||||
|
||||
// error
|
||||
errorLight: $errorLight;
|
||||
errorMain: $errorMain;
|
||||
errorDark: $errorDark;
|
||||
|
||||
// orange
|
||||
orangeLight: $orangeLight;
|
||||
orangeMain: $orangeMain;
|
||||
orangeDark: $orangeDark;
|
||||
|
||||
// warning
|
||||
warningLight: $warningLight;
|
||||
warningMain: $warningMain;
|
||||
warningDark: $warningDark;
|
||||
|
||||
// grey
|
||||
grey50: $grey50;
|
||||
grey100: $grey100;
|
||||
grey200: $grey200;
|
||||
grey300: $grey300;
|
||||
grey500: $grey500;
|
||||
grey600: $grey600;
|
||||
grey700: $grey700;
|
||||
grey900: $grey900;
|
||||
|
||||
// ==============================|| DARK THEME VARIANTS ||============================== //
|
||||
|
||||
// paper & background
|
||||
darkPaper: $darkPaper;
|
||||
darkBackground: $darkBackground;
|
||||
|
||||
// dark 800 & 900
|
||||
darkLevel1: $darkLevel1;
|
||||
darkLevel2: $darkLevel2;
|
||||
|
||||
// text variants
|
||||
darkTextTitle: $darkTextTitle;
|
||||
darkTextPrimary: $darkTextPrimary;
|
||||
darkTextSecondary: $darkTextSecondary;
|
||||
|
||||
// primary dark
|
||||
darkPrimaryLight: $darkPrimaryLight;
|
||||
darkPrimaryMain: $darkPrimaryMain;
|
||||
darkPrimaryDark: $darkPrimaryDark;
|
||||
darkPrimary200: $darkPrimary200;
|
||||
darkPrimary800: $darkPrimary800;
|
||||
|
||||
// secondary dark
|
||||
darkSecondaryLight: $darkSecondaryLight;
|
||||
darkSecondaryMain: $darkSecondaryMain;
|
||||
darkSecondaryDark: $darkSecondaryDark;
|
||||
darkSecondary200: $darkSecondary200;
|
||||
darkSecondary800: $darkSecondary800;
|
||||
}
|
||||
@@ -1,128 +0,0 @@
|
||||
// color variants
|
||||
@import 'themes-vars.module.scss';
|
||||
|
||||
// third-party
|
||||
@import '~react-perfect-scrollbar/dist/css/styles.css';
|
||||
|
||||
// ==============================|| LIGHT BOX ||============================== //
|
||||
.fullscreen .react-images__blanket {
|
||||
z-index: 1200;
|
||||
}
|
||||
|
||||
// ==============================|| APEXCHART ||============================== //
|
||||
|
||||
.apexcharts-legend-series .apexcharts-legend-marker {
|
||||
margin-right: 8px;
|
||||
}
|
||||
|
||||
// ==============================|| PERFECT SCROLLBAR ||============================== //
|
||||
|
||||
.scrollbar-container {
|
||||
.ps__rail-y {
|
||||
&:hover > .ps__thumb-y,
|
||||
&:focus > .ps__thumb-y,
|
||||
&.ps--clicking .ps__thumb-y {
|
||||
background-color: $grey500;
|
||||
width: 5px;
|
||||
}
|
||||
}
|
||||
.ps__thumb-y {
|
||||
background-color: $grey500;
|
||||
border-radius: 6px;
|
||||
width: 5px;
|
||||
right: 0;
|
||||
}
|
||||
}
|
||||
|
||||
.scrollbar-container.ps,
|
||||
.scrollbar-container > .ps {
|
||||
&.ps--active-y > .ps__rail-y {
|
||||
width: 5px;
|
||||
background-color: transparent !important;
|
||||
z-index: 999;
|
||||
&:hover,
|
||||
&.ps--clicking {
|
||||
width: 5px;
|
||||
background-color: transparent;
|
||||
}
|
||||
}
|
||||
&.ps--scrolling-y > .ps__rail-y,
|
||||
&.ps--scrolling-x > .ps__rail-x {
|
||||
opacity: 0.4;
|
||||
background-color: transparent;
|
||||
}
|
||||
}
|
||||
|
||||
// ==============================|| ANIMATION KEYFRAMES ||============================== //
|
||||
|
||||
@keyframes wings {
|
||||
50% {
|
||||
transform: translateY(-40px);
|
||||
}
|
||||
100% {
|
||||
transform: translateY(0px);
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes blink {
|
||||
50% {
|
||||
opacity: 0;
|
||||
}
|
||||
100% {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes bounce {
|
||||
0%,
|
||||
20%,
|
||||
53%,
|
||||
to {
|
||||
animation-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1);
|
||||
transform: translateZ(0);
|
||||
}
|
||||
40%,
|
||||
43% {
|
||||
animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06);
|
||||
transform: translate3d(0, -5px, 0);
|
||||
}
|
||||
70% {
|
||||
animation-timing-function: cubic-bezier(0.755, 0.05, 0.855, 0.06);
|
||||
transform: translate3d(0, -7px, 0);
|
||||
}
|
||||
80% {
|
||||
transition-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1);
|
||||
transform: translateZ(0);
|
||||
}
|
||||
90% {
|
||||
transform: translate3d(0, -2px, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes slideY {
|
||||
0%,
|
||||
50%,
|
||||
100% {
|
||||
transform: translateY(0px);
|
||||
}
|
||||
25% {
|
||||
transform: translateY(-10px);
|
||||
}
|
||||
75% {
|
||||
transform: translateY(10px);
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes slideX {
|
||||
0%,
|
||||
50%,
|
||||
100% {
|
||||
transform: translateX(0px);
|
||||
}
|
||||
25% {
|
||||
transform: translateX(-10px);
|
||||
}
|
||||
75% {
|
||||
transform: translateX(10px);
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
const config = {
|
||||
// basename: only at build time to set, and Don't add '/' at end off BASENAME for breadcrumbs, also Don't put only '/' use blank('') instead,
|
||||
// like '/berry-material-react/react/default'
|
||||
basename: '/',
|
||||
defaultPath: '/panel/dashboard',
|
||||
fontFamily: `'Roboto', sans-serif, Helvetica, Arial, sans-serif`,
|
||||
borderRadius: 12,
|
||||
siteInfo: {
|
||||
chat_link: '',
|
||||
display_in_currency: true,
|
||||
email_verification: false,
|
||||
footer_html: '',
|
||||
github_client_id: '',
|
||||
github_oauth: false,
|
||||
logo: '',
|
||||
quota_per_unit: 500000,
|
||||
server_address: '',
|
||||
start_time: 0,
|
||||
system_name: 'One API',
|
||||
top_up_link: '',
|
||||
turnstile_check: false,
|
||||
turnstile_site_key: '',
|
||||
version: '',
|
||||
wechat_login: false,
|
||||
wechat_qrcode: ''
|
||||
}
|
||||
};
|
||||
|
||||
export default config;
|
||||
@@ -1,146 +0,0 @@
|
||||
export const CHANNEL_OPTIONS = {
|
||||
1: {
|
||||
key: 1,
|
||||
text: 'OpenAI',
|
||||
value: 1,
|
||||
color: 'primary'
|
||||
},
|
||||
14: {
|
||||
key: 14,
|
||||
text: 'Anthropic Claude',
|
||||
value: 14,
|
||||
color: 'info'
|
||||
},
|
||||
3: {
|
||||
key: 3,
|
||||
text: 'Azure OpenAI',
|
||||
value: 3,
|
||||
color: 'orange'
|
||||
},
|
||||
11: {
|
||||
key: 11,
|
||||
text: 'Google PaLM2',
|
||||
value: 11,
|
||||
color: 'orange'
|
||||
},
|
||||
24: {
|
||||
key: 24,
|
||||
text: 'Google Gemini',
|
||||
value: 24,
|
||||
color: 'orange'
|
||||
},
|
||||
15: {
|
||||
key: 15,
|
||||
text: '百度文心千帆',
|
||||
value: 15,
|
||||
color: 'default'
|
||||
},
|
||||
17: {
|
||||
key: 17,
|
||||
text: '阿里通义千问',
|
||||
value: 17,
|
||||
color: 'default'
|
||||
},
|
||||
18: {
|
||||
key: 18,
|
||||
text: '讯飞星火认知',
|
||||
value: 18,
|
||||
color: 'default'
|
||||
},
|
||||
16: {
|
||||
key: 16,
|
||||
text: '智谱 ChatGLM',
|
||||
value: 16,
|
||||
color: 'default'
|
||||
},
|
||||
19: {
|
||||
key: 19,
|
||||
text: '360 智脑',
|
||||
value: 19,
|
||||
color: 'default'
|
||||
},
|
||||
23: {
|
||||
key: 23,
|
||||
text: '腾讯混元',
|
||||
value: 23,
|
||||
color: 'default'
|
||||
},
|
||||
8: {
|
||||
key: 8,
|
||||
text: '自定义渠道',
|
||||
value: 8,
|
||||
color: 'primary'
|
||||
},
|
||||
22: {
|
||||
key: 22,
|
||||
text: '知识库:FastGPT',
|
||||
value: 22,
|
||||
color: 'default'
|
||||
},
|
||||
21: {
|
||||
key: 21,
|
||||
text: '知识库:AI Proxy',
|
||||
value: 21,
|
||||
color: 'purple'
|
||||
},
|
||||
20: {
|
||||
key: 20,
|
||||
text: '代理:OpenRouter',
|
||||
value: 20,
|
||||
color: 'primary'
|
||||
},
|
||||
2: {
|
||||
key: 2,
|
||||
text: '代理:API2D',
|
||||
value: 2,
|
||||
color: 'primary'
|
||||
},
|
||||
5: {
|
||||
key: 5,
|
||||
text: '代理:OpenAI-SB',
|
||||
value: 5,
|
||||
color: 'primary'
|
||||
},
|
||||
7: {
|
||||
key: 7,
|
||||
text: '代理:OhMyGPT',
|
||||
value: 7,
|
||||
color: 'primary'
|
||||
},
|
||||
10: {
|
||||
key: 10,
|
||||
text: '代理:AI Proxy',
|
||||
value: 10,
|
||||
color: 'primary'
|
||||
},
|
||||
4: {
|
||||
key: 4,
|
||||
text: '代理:CloseAI',
|
||||
value: 4,
|
||||
color: 'primary'
|
||||
},
|
||||
6: {
|
||||
key: 6,
|
||||
text: '代理:OpenAI Max',
|
||||
value: 6,
|
||||
color: 'primary'
|
||||
},
|
||||
9: {
|
||||
key: 9,
|
||||
text: '代理:AI.LS',
|
||||
value: 9,
|
||||
color: 'primary'
|
||||
},
|
||||
12: {
|
||||
key: 12,
|
||||
text: '代理:API2GPT',
|
||||
value: 12,
|
||||
color: 'primary'
|
||||
},
|
||||
13: {
|
||||
key: 13,
|
||||
text: '代理:AIGC2D',
|
||||
value: 13,
|
||||
color: 'primary'
|
||||
}
|
||||
};
|
||||
@@ -1 +0,0 @@
|
||||
export const ITEMS_PER_PAGE = 10; // this value must keep same as the one defined in backend!
|
||||
@@ -1,27 +0,0 @@
|
||||
export const snackbarConstants = {
|
||||
Common: {
|
||||
ERROR: {
|
||||
variant: 'error',
|
||||
autoHideDuration: 5000
|
||||
},
|
||||
WARNING: {
|
||||
variant: 'warning',
|
||||
autoHideDuration: 10000
|
||||
},
|
||||
SUCCESS: {
|
||||
variant: 'success',
|
||||
autoHideDuration: 1500
|
||||
},
|
||||
INFO: {
|
||||
variant: 'info',
|
||||
autoHideDuration: 3000
|
||||
},
|
||||
NOTICE: {
|
||||
variant: 'info',
|
||||
autoHideDuration: 20000
|
||||
}
|
||||
},
|
||||
Mobile: {
|
||||
anchorOrigin: { vertical: 'bottom', horizontal: 'center' }
|
||||
}
|
||||
};
|
||||
@@ -1,3 +0,0 @@
|
||||
export * from './SnackbarConstants';
|
||||
export * from './CommonConstants';
|
||||
export * from './ChannelConstants';
|
||||
@@ -1,70 +0,0 @@
|
||||
import { useEffect, useCallback, createContext } from "react";
|
||||
import { API } from "utils/api";
|
||||
import { showNotice, showError } from "utils/common";
|
||||
import { SET_SITE_INFO } from "store/actions";
|
||||
import { useDispatch } from "react-redux";
|
||||
|
||||
export const LoadStatusContext = createContext();
|
||||
|
||||
// eslint-disable-next-line
|
||||
const StatusProvider = ({ children }) => {
|
||||
const dispatch = useDispatch();
|
||||
|
||||
const loadStatus = useCallback(async () => {
|
||||
const res = await API.get("/api/status");
|
||||
const { success, data } = res.data;
|
||||
let system_name = "";
|
||||
if (success) {
|
||||
if (!data.chat_link) {
|
||||
delete data.chat_link;
|
||||
}
|
||||
localStorage.setItem("siteInfo", JSON.stringify(data));
|
||||
localStorage.setItem("quota_per_unit", data.quota_per_unit);
|
||||
localStorage.setItem("display_in_currency", data.display_in_currency);
|
||||
dispatch({ type: SET_SITE_INFO, payload: data });
|
||||
if (
|
||||
data.version !== process.env.REACT_APP_VERSION &&
|
||||
data.version !== "v0.0.0" &&
|
||||
data.version !== "" &&
|
||||
process.env.REACT_APP_VERSION !== ""
|
||||
) {
|
||||
showNotice(
|
||||
`新版本可用:${data.version},请使用快捷键 Shift + F5 刷新页面`
|
||||
);
|
||||
}
|
||||
if (data.system_name) {
|
||||
system_name = data.system_name;
|
||||
}
|
||||
} else {
|
||||
const backupSiteInfo = localStorage.getItem("siteInfo");
|
||||
if (backupSiteInfo) {
|
||||
const data = JSON.parse(backupSiteInfo);
|
||||
if (data.system_name) {
|
||||
system_name = data.system_name;
|
||||
}
|
||||
dispatch({
|
||||
type: SET_SITE_INFO,
|
||||
payload: data,
|
||||
});
|
||||
}
|
||||
showError("无法正常连接至服务器!");
|
||||
}
|
||||
|
||||
if (system_name) {
|
||||
document.title = system_name;
|
||||
}
|
||||
}, [dispatch]);
|
||||
|
||||
useEffect(() => {
|
||||
loadStatus().then();
|
||||
}, [loadStatus]);
|
||||
|
||||
return (
|
||||
<LoadStatusContext.Provider value={loadStatus}>
|
||||
{" "}
|
||||
{children}{" "}
|
||||
</LoadStatusContext.Provider>
|
||||
);
|
||||
};
|
||||
|
||||
export default StatusProvider;
|
||||
@@ -1,29 +0,0 @@
|
||||
// contexts/User/index.jsx
|
||||
import React, { useEffect, useCallback, createContext, useState } from 'react';
|
||||
import { LOGIN } from 'store/actions';
|
||||
import { useDispatch } from 'react-redux';
|
||||
|
||||
export const UserContext = createContext();
|
||||
|
||||
// eslint-disable-next-line
|
||||
const UserProvider = ({ children }) => {
|
||||
const dispatch = useDispatch();
|
||||
const [isUserLoaded, setIsUserLoaded] = useState(false);
|
||||
|
||||
const loadUser = useCallback(() => {
|
||||
let user = localStorage.getItem('user');
|
||||
if (user) {
|
||||
let data = JSON.parse(user);
|
||||
dispatch({ type: LOGIN, payload: data });
|
||||
}
|
||||
setIsUserLoaded(true);
|
||||
}, [dispatch]);
|
||||
|
||||
useEffect(() => {
|
||||
loadUser();
|
||||
}, [loadUser]);
|
||||
|
||||
return <UserContext.Provider value={{ loadUser, isUserLoaded }}> {children} </UserContext.Provider>;
|
||||
};
|
||||
|
||||
export default UserProvider;
|
||||
@@ -1,13 +0,0 @@
|
||||
import { isAdmin } from 'utils/common';
|
||||
import { useNavigate } from 'react-router-dom';
|
||||
const navigate = useNavigate();
|
||||
|
||||
const useAuth = () => {
|
||||
const userIsAdmin = isAdmin();
|
||||
|
||||
if (!userIsAdmin) {
|
||||
navigate('/panel/404');
|
||||
}
|
||||
};
|
||||
|
||||
export default useAuth;
|
||||