Compare commits

..

89 Commits
dev ... v0.6.2

Author SHA1 Message Date
JustSong
b204f6d82b docs: update README 2024-03-15 00:55:28 +08:00
JustSong
752639560f feat: able to use separated database for table logs 2024-03-15 00:30:15 +08:00
JustSong
996f4d99dd ci: fix ci 2024-03-14 23:53:25 +08:00
warjiang
ebfee3b46c feat: add support for private registry in docker-compose.yml (#1103) 2024-03-14 23:47:46 +08:00
dependabot[bot]
3e2e805d61 chore(deps): bump google.golang.org/protobuf from 1.30.0 to 1.33.0 (#1145)
Bumps google.golang.org/protobuf from 1.30.0 to 1.33.0.

---
updated-dependencies:
- dependency-name: google.golang.org/protobuf
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-14 23:46:17 +08:00
E.da
3edf7247c4 fix: fix theme berry copy (#1148)
调整berry主题页脚`label`表述
2024-03-14 23:45:50 +08:00
afafw
0926b6206b chore: update client name (#934) 2024-03-14 23:44:46 +08:00
JustSong
7cd57f3125 chore: update ratio for baidu embedding 2024-03-14 23:36:10 +08:00
Jguobao
66efabd5ae fix: fix baidu url check (#1143)
添加百度的另外3个向量模型【"bge-large-zh",
	"bge-large-en",
	"tao-8k",
】
2024-03-14 23:31:07 +08:00
JustSong
8ede66a896 fix: fix ci 2024-03-14 23:27:47 +08:00
JustSong
b169173860 fix: force set Accept header for ali stream request (close #1151) 2024-03-14 23:20:38 +08:00
JustSong
f33555ae78 fix: update max token for test (close #1154) 2024-03-14 23:17:19 +08:00
JustSong
c28ec10795 fix: fix cors for dashboard api 2024-03-14 23:14:39 +08:00
JustSong
e3767cbb07 fix: fix haiku model name (close #1149) 2024-03-14 23:13:05 +08:00
JustSong
be9eb59fbb feat: support lingyiwanwu 2024-03-14 23:11:36 +08:00
JustSong
89e111ac69 ci: fix ci condition 2024-03-14 01:17:19 +08:00
JustSong
2dcef85285 feat: support ollama now (close #870) 2024-03-14 01:02:47 +08:00
JustSong
79d0cd378a fix: fix baidu system prompt (close #1079) 2024-03-13 22:56:54 +08:00
JustSong
e99150bdb9 fix: make quota int64 2024-03-13 20:00:51 +08:00
JustSong
a72e5fcc9e fix: when cached quota is too low, force refresh it 2024-03-13 19:38:44 +08:00
JustSong
0710f8cd66 fix: when cached quota is too low, force refresh it 2024-03-13 19:26:24 +08:00
JustSong
49cad7d4a5 feat: update func ShouldDisableChannel for claude 2024-03-13 19:11:30 +08:00
JustSong
a90161cf00 chore: drop idx_channels_key on start 2024-03-11 02:24:58 +08:00
sparanoid
a45fc7d736 fix: model name typo (#1109) 2024-03-11 00:44:49 +08:00
JustSong
45940dcb12 chore: add more info for panic fix 2024-03-10 23:59:35 +08:00
JustSong
969042b001 chore: only use one log file (close #1116) 2024-03-10 23:44:48 +08:00
JustSong
7e7369dbc4 fix: only disable channel when allowed 2024-03-10 23:41:16 +08:00
JustSong
e54e647170 chore: remove useless code 2024-03-10 23:36:29 +08:00
JustSong
358920c858 fix: remove index idx_channels_key (close #644) 2024-03-10 23:27:22 +08:00
JustSong
1ea598c773 feat: check claude's error response 2024-03-10 20:39:55 +08:00
JustSong
796be42487 feat: update ratio config if missing 2024-03-10 19:29:42 +08:00
JustSong
5b50eb94e5 feat: able to send alert message via message pusher (close #993) 2024-03-10 19:16:06 +08:00
JustSong
71c61365eb feat: able to only test disabled channels (#1090) 2024-03-10 18:34:57 +08:00
JustSong
b09f979b80 fix: add missing turnstile setup (close #1015) 2024-03-10 18:15:24 +08:00
JustSong
12440874b0 feat: able to disable channel by success rate 2024-03-10 17:57:47 +08:00
JustSong
6ebc99460e fix: add user to blacklist when it's banned or deleted, and make deletion soft (close #473, close #791) 2024-03-10 15:56:19 +08:00
JustSong
27ad8bfb98 feat: able to search channel type now 2024-03-10 15:00:33 +08:00
JustSong
8388aa537f chore: able to search channel now 2024-03-10 14:59:57 +08:00
JustSong
2346bf70af fix: check response type when expect stream response 2024-03-10 14:59:40 +08:00
JustSong
f05b403ca5 feat: use real system prompt now (close #1079) 2024-03-10 14:32:30 +08:00
JustSong
b33616df44 feat: support groq now (close #1087) 2024-03-10 14:09:44 +08:00
JustSong
cf16f44970 feat: load channel models from server 2024-03-09 02:28:23 +08:00
JustSong
bf2e26a48f feat: support claude-3 (close #1080, close #1094) 2024-03-09 01:12:47 +08:00
momomobinx
4fb22ad4ce feat: support third part models of baidu (#1046)
百度千帆平台上的第三方大模型调用
2024-03-03 23:50:28 +08:00
JustSong
95cfb8e8c9 fix: using the first available model if default model is not found (close #1021) 2024-03-03 22:58:41 +08:00
JustSong
c6ace985c2 fix: set missing ali parameters (close #1028) 2024-03-03 22:51:01 +08:00
JustSong
10a926b8f3 feat: only use the top priority when first retry (#1048) 2024-03-03 22:16:34 +08:00
JustSong
2df877a352 feat: switch priority when retry (close #1048) 2024-03-03 22:14:07 +08:00
JustSong
9d8967f7d3 feat: support Mistral's models now (close #1051) 2024-03-03 21:46:45 +08:00
JustSong
b35f3523d3 feat: add gemini model alias (close #1064) 2024-03-03 21:03:04 +08:00
JustSong
82e916b5ff fix: fix azure test (close #1069) 2024-03-03 20:51:28 +08:00
JustSong
de18d6fe16 refactor: refactor image relay (close #1068) 2024-03-03 19:30:11 +08:00
JustSong
1d0b7fb5ae feat: support chatglm-4 (close #1045, close #952, close #952, close #943) 2024-03-02 03:05:25 +08:00
JustSong
f9490bb72e fix: able to use updated default ratio 2024-03-02 01:32:04 +08:00
JustSong
76467285e8 docs: update readme 2024-03-02 01:25:21 +08:00
JustSong
df1fd9aa81 feat: support minimax's models now (close #354) 2024-03-02 01:24:28 +08:00
JustSong
614c2e0442 feat: support baichuan's models now (close #1057) 2024-03-02 00:55:48 +08:00
JustSong
eac6a0b9aa fix: fix version is blank 2024-03-02 00:03:29 +08:00
JustSong
b747cdbc6f fix: fix getAndValidateTextRequest failed: unexpected end of JSON input (close #1043) 2024-02-26 22:52:16 +08:00
JustSong
6b27d6659a fix: add role for ChatCompletionsStreamResponseChoice.Delta 2024-02-25 19:49:22 +08:00
JustSong
dc5b781191 fix: fix stream response id 2024-02-25 19:47:59 +08:00
JustSong
c880b4a9a3 fix: fix missing index in ChatCompletionsStreamResponseChoice (#1037) 2024-02-25 19:17:37 +08:00
JustSong
565ea58e68 feat: built in retry supported (close #1036, close #770) 2024-02-25 19:01:49 +08:00
JustSong
f141a37a9e fix: fix "error update user quota cache: Error 1040: Too many connections" 2024-02-25 16:58:14 +08:00
JustSong
5b78886ad3 fix: fix i18n 2024-02-25 16:53:46 +08:00
JustSong
87c7c4f0e6 fix: rm history build before building 2024-02-25 02:07:34 +08:00
JustSong
4c4a873890 fix: add an ending line for THEMES 2024-02-25 01:59:40 +08:00
JustSong
0664bdfda1 fix: fix build.sh (close #1026) 2024-02-25 01:53:27 +08:00
JustSong
32387d9c20 fix: fix version is blank 2024-02-21 22:21:01 +08:00
JustSong
bd888f2eb7 fix: fix prompt token is zero (close #1023) 2024-02-21 22:19:42 +08:00
JustSong
cece77e533 fix: fix model list 2024-02-19 22:20:18 +08:00
JustSong
2a5468e23c refactor: remove useless button (close #1014) 2024-02-18 22:21:37 +08:00
JustSong
d0e415893b fix: fix SparkDesk model name 2024-02-18 17:16:11 +08:00
JustSong
6cf5ce9a7a fix: fix SparkDesk model name 2024-02-18 17:11:16 +08:00
JustSong
f598b9df87 feat: add new SparkDesk models 2024-02-18 17:02:36 +08:00
JustSong
532c50d212 fix: fix channel table page copy 2024-02-18 16:19:14 +08:00
JustSong
2acc2f5017 feat: support moonshot now (close #804) 2024-02-18 16:17:19 +08:00
JustSong
604ac56305 fix: set seed parameter for qwen (close #1005) 2024-02-18 15:01:09 +08:00
JustSong
9383b638a6 feat: add ChatPro & ChatStd for tencent (#1010) 2024-02-18 14:40:01 +08:00
JustSong
28d512a675 refactor: delete useless code 2024-02-18 02:23:31 +08:00
JustSong
de9a58ca0b refactor: use config field to save config 2024-02-18 02:22:50 +08:00
JustSong
1aa374ccfb refactor: use adaptor to do relay & test 2024-02-18 00:15:31 +08:00
Laisky.Cai
d548a01c59 feat: Handle errors, validate model names, and calculate quota usage (#978)
- Improved error handling in various modules for better stability and responsiveness.
- Optimized code in several files for improved efficiency and readability.
- Enhanced user experience by providing more detailed error responses in the controller.
- Strengthened security by ignoring sensitive files in `.gitignore`.
2024-02-12 21:35:40 +08:00
JustSong
2cd1a78203 chore: update module name 2024-01-28 19:38:58 +08:00
JustSong
b9d3cb0c45 refactor: split RelayTextHelper function 2024-01-28 19:14:46 +08:00
JustSong
ea407f0054 feat: able to set completion ration now (close #968) 2024-01-28 16:45:54 +08:00
Benny
26e2e646cb feat: sync models with OpenAI (#971)
* add new 0125 chat models and embedding-3 models

* refine the step of manually deploying

* add gpt-4-turbo-preview
2024-01-28 16:09:21 +08:00
yongman
4f214c48c6 fix: fix primary chat button (#951)
Signed-off-by: yongman <yming0221@gmail.com>
2024-01-21 23:27:34 +08:00
JustSong
2d760d4a01 refactor: refactor relay part (#957)
* refactor: refactor relay part

* refactor: refactor config part
2024-01-21 23:21:42 +08:00
167 changed files with 4063 additions and 2592 deletions

View File

@@ -20,6 +20,13 @@ jobs:
- name: Check out the repo - name: Check out the repo
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Check repository URL
run: |
REPO_URL=$(git config --get remote.origin.url)
if [[ $REPO_URL == *"pro" ]]; then
exit 0
fi
- name: Save version info - name: Save version info
run: | run: |
git describe --tags > VERSION git describe --tags > VERSION

View File

@@ -20,6 +20,13 @@ jobs:
- name: Check out the repo - name: Check out the repo
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Check repository URL
run: |
REPO_URL=$(git config --get remote.origin.url)
if [[ $REPO_URL == *"pro" ]]; then
exit 0
fi
- name: Save version info - name: Save version info
run: | run: |
git describe --tags > VERSION git describe --tags > VERSION

View File

@@ -21,6 +21,13 @@ jobs:
- name: Check out the repo - name: Check out the repo
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Check repository URL
run: |
REPO_URL=$(git config --get remote.origin.url)
if [[ $REPO_URL == *"pro" ]]; then
exit 0
fi
- name: Save version info - name: Save version info
run: | run: |
git describe --tags > VERSION git describe --tags > VERSION

View File

@@ -20,10 +20,16 @@ jobs:
uses: actions/checkout@v3 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Check repository URL
run: |
REPO_URL=$(git config --get remote.origin.url)
if [[ $REPO_URL == *"pro" ]]; then
exit 0
fi
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
with: with:
node-version: 16 node-version: 16
- name: Build Frontend (theme default) - name: Build Frontend
env: env:
CI: "" CI: ""
run: | run: |
@@ -38,7 +44,7 @@ jobs:
- name: Build Backend (amd64) - name: Build Backend (amd64)
run: | run: |
go mod download go mod download
go build -ldflags "-s -w -X 'one-api/common.Version=$(git describe --tags)' -extldflags '-static'" -o one-api go build -ldflags "-s -w -X 'github.com/songquanpeng/one-api/common.Version=$(git describe --tags)' -extldflags '-static'" -o one-api
- name: Build Backend (arm64) - name: Build Backend (arm64)
run: | run: |

View File

@@ -20,10 +20,16 @@ jobs:
uses: actions/checkout@v3 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Check repository URL
run: |
REPO_URL=$(git config --get remote.origin.url)
if [[ $REPO_URL == *"pro" ]]; then
exit 0
fi
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
with: with:
node-version: 16 node-version: 16
- name: Build Frontend (theme default) - name: Build Frontend
env: env:
CI: "" CI: ""
run: | run: |
@@ -38,7 +44,7 @@ jobs:
- name: Build Backend - name: Build Backend
run: | run: |
go mod download go mod download
go build -ldflags "-X 'one-api/common.Version=$(git describe --tags)'" -o one-api-macos go build -ldflags "-X 'github.com/songquanpeng/one-api/common.Version=$(git describe --tags)'" -o one-api-macos
- name: Release - name: Release
uses: softprops/action-gh-release@v1 uses: softprops/action-gh-release@v1
if: startsWith(github.ref, 'refs/tags/') if: startsWith(github.ref, 'refs/tags/')

View File

@@ -23,10 +23,16 @@ jobs:
uses: actions/checkout@v3 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Check repository URL
run: |
REPO_URL=$(git config --get remote.origin.url)
if [[ $REPO_URL == *"pro" ]]; then
exit 0
fi
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
with: with:
node-version: 16 node-version: 16
- name: Build Frontend (theme default) - name: Build Frontend
env: env:
CI: "" CI: ""
run: | run: |
@@ -41,7 +47,7 @@ jobs:
- name: Build Backend - name: Build Backend
run: | run: |
go mod download go mod download
go build -ldflags "-s -w -X 'one-api/common.Version=$(git describe --tags)'" -o one-api.exe go build -ldflags "-s -w -X 'github.com/songquanpeng/one-api/common.Version=$(git describe --tags)'" -o one-api.exe
- name: Release - name: Release
uses: softprops/action-gh-release@v1 uses: softprops/action-gh-release@v1
if: startsWith(github.ref, 'refs/tags/') if: startsWith(github.ref, 'refs/tags/')

3
.gitignore vendored
View File

@@ -6,4 +6,5 @@ upload
build build
*.db-journal *.db-journal
logs logs
data data
/web/node_modules

View File

@@ -23,7 +23,7 @@ ADD go.mod go.sum ./
RUN go mod download RUN go mod download
COPY . . COPY . .
COPY --from=builder /web/build ./web/build COPY --from=builder /web/build ./web/build
RUN go build -ldflags "-s -w -X 'one-api/common.Version=$(cat VERSION)' -extldflags '-static'" -o one-api RUN go build -ldflags "-s -w -X 'github.com/songquanpeng/one-api/common.Version=$(cat VERSION)' -extldflags '-static'" -o one-api
FROM alpine FROM alpine

View File

@@ -134,12 +134,12 @@ The initial account username is `root` and password is `123456`.
git clone https://github.com/songquanpeng/one-api.git git clone https://github.com/songquanpeng/one-api.git
# Build the frontend # Build the frontend
cd one-api/web cd one-api/web/default
npm install npm install
npm run build npm run build
# Build the backend # Build the backend
cd .. cd ../..
go mod download go mod download
go build -ldflags "-s -w" -o one-api go build -ldflags "-s -w" -o one-api
``` ```

View File

@@ -135,12 +135,12 @@ sudo service nginx restart
git clone https://github.com/songquanpeng/one-api.git git clone https://github.com/songquanpeng/one-api.git
# フロントエンドのビルド # フロントエンドのビルド
cd one-api/web cd one-api/web/default
npm install npm install
npm run build npm run build
# バックエンドのビルド # バックエンドのビルド
cd .. cd ../..
go mod download go mod download
go build -ldflags "-s -w" -o one-api go build -ldflags "-s -w" -o one-api
``` ```

View File

@@ -67,12 +67,20 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
+ [x] [OpenAI ChatGPT 系列模型](https://platform.openai.com/docs/guides/gpt/chat-completions-api)(支持 [Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference) + [x] [OpenAI ChatGPT 系列模型](https://platform.openai.com/docs/guides/gpt/chat-completions-api)(支持 [Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference)
+ [x] [Anthropic Claude 系列模型](https://anthropic.com) + [x] [Anthropic Claude 系列模型](https://anthropic.com)
+ [x] [Google PaLM2/Gemini 系列模型](https://developers.generativeai.google) + [x] [Google PaLM2/Gemini 系列模型](https://developers.generativeai.google)
+ [x] [Mistral 系列模型](https://mistral.ai/)
+ [x] [百度文心一言系列模型](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html) + [x] [百度文心一言系列模型](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html)
+ [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html) + [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html)
+ [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html) + [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html)
+ [x] [智谱 ChatGLM 系列模型](https://bigmodel.cn) + [x] [智谱 ChatGLM 系列模型](https://bigmodel.cn)
+ [x] [360 智脑](https://ai.360.cn) + [x] [360 智脑](https://ai.360.cn)
+ [x] [腾讯混元大模型](https://cloud.tencent.com/document/product/1729) + [x] [腾讯混元大模型](https://cloud.tencent.com/document/product/1729)
+ [x] [Moonshot AI](https://platform.moonshot.cn/)
+ [x] [百川大模型](https://platform.baichuan-ai.com)
+ [ ] [字节云雀大模型](https://www.volcengine.com/product/ark) (WIP)
+ [x] [MINIMAX](https://api.minimax.chat/)
+ [x] [Groq](https://wow.groq.com/)
+ [x] [Ollama](https://github.com/ollama/ollama)
+ [x] [零一万物](https://platform.lingyiwanwu.com/)
2. 支持配置镜像以及众多[第三方代理服务](https://iamazing.cn/page/openai-api-third-party-services)。 2. 支持配置镜像以及众多[第三方代理服务](https://iamazing.cn/page/openai-api-third-party-services)。
3. 支持通过**负载均衡**的方式访问多个渠道。 3. 支持通过**负载均衡**的方式访问多个渠道。
4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。 4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。
@@ -100,6 +108,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
+ [GitHub 开放授权](https://github.com/settings/applications/new)。 + [GitHub 开放授权](https://github.com/settings/applications/new)。
+ 微信公众号授权(需要额外部署 [WeChat Server](https://github.com/songquanpeng/wechat-server))。 + 微信公众号授权(需要额外部署 [WeChat Server](https://github.com/songquanpeng/wechat-server))。
23. 支持主题切换,设置环境变量 `THEME` 即可,默认为 `default`,欢迎 PR 更多主题,具体参考[此处](./web/README.md)。 23. 支持主题切换,设置环境变量 `THEME` 即可,默认为 `default`,欢迎 PR 更多主题,具体参考[此处](./web/README.md)。
24. 配合 [Message Pusher](https://github.com/songquanpeng/message-pusher) 可将报警信息推送到多种 App 上。
## 部署 ## 部署
### 基于 Docker 进行部署 ### 基于 Docker 进行部署
@@ -174,12 +183,12 @@ docker-compose ps
git clone https://github.com/songquanpeng/one-api.git git clone https://github.com/songquanpeng/one-api.git
# 构建前端 # 构建前端
cd one-api/web cd one-api/web/default
npm install npm install
npm run build npm run build
# 构建后端 # 构建后端
cd .. cd ../..
go mod download go mod download
go build -ldflags "-s -w" -o one-api go build -ldflags "-s -w" -o one-api
```` ````
@@ -369,6 +378,9 @@ graph LR
16. `SQLITE_BUSY_TIMEOUT`SQLite 锁等待超时设置,单位为毫秒,默认 `3000`。 16. `SQLITE_BUSY_TIMEOUT`SQLite 锁等待超时设置,单位为毫秒,默认 `3000`。
17. `GEMINI_SAFETY_SETTING`Gemini 的安全设置,默认 `BLOCK_NONE`。 17. `GEMINI_SAFETY_SETTING`Gemini 的安全设置,默认 `BLOCK_NONE`。
18. `THEME`:系统的主题设置,默认为 `default`,具体可选值参考[此处](./web/README.md)。 18. `THEME`:系统的主题设置,默认为 `default`,具体可选值参考[此处](./web/README.md)。
19. `ENABLE_METRIC`:是否根据请求成功率禁用渠道,默认不开启,可选值为 `true` 和 `false`。
20. `METRIC_QUEUE_SIZE`:请求成功率统计队列大小,默认为 `10`。
21. `METRIC_SUCCESS_RATE_THRESHOLD`:请求成功率阈值,默认为 `0.8`。
### 命令行参数 ### 命令行参数
1. `--port <port_number>`: 指定服务器监听的端口号,默认为 `3000`。 1. `--port <port_number>`: 指定服务器监听的端口号,默认为 `3000`。

29
common/blacklist/main.go Normal file
View File

@@ -0,0 +1,29 @@
package blacklist
import (
"fmt"
"sync"
)
var blackList sync.Map
func init() {
blackList = sync.Map{}
}
func userId2Key(id int) string {
return fmt.Sprintf("userid_%d", id)
}
func BanUser(id int) {
blackList.Store(userId2Key(id), true)
}
func UnbanUser(id int) {
blackList.Delete(userId2Key(id))
}
func IsUserBanned(id int) bool {
_, ok := blackList.Load(userId2Key(id))
return ok
}

View File

@@ -1,7 +1,7 @@
package config package config
import ( import (
"one-api/common/helper" "github.com/songquanpeng/one-api/common/env"
"os" "os"
"strconv" "strconv"
"sync" "sync"
@@ -52,6 +52,7 @@ var EmailDomainWhitelist = []string{
} }
var DebugEnabled = os.Getenv("DEBUG") == "true" var DebugEnabled = os.Getenv("DEBUG") == "true"
var DebugSQLEnabled = os.Getenv("DEBUG_SQL") == "true"
var MemoryCacheEnabled = os.Getenv("MEMORY_CACHE_ENABLED") == "true" var MemoryCacheEnabled = os.Getenv("MEMORY_CACHE_ENABLED") == "true"
var LogConsumeEnabled = true var LogConsumeEnabled = true
@@ -69,17 +70,20 @@ var WeChatServerAddress = ""
var WeChatServerToken = "" var WeChatServerToken = ""
var WeChatAccountQRCodeImageURL = "" var WeChatAccountQRCodeImageURL = ""
var MessagePusherAddress = ""
var MessagePusherToken = ""
var TurnstileSiteKey = "" var TurnstileSiteKey = ""
var TurnstileSecretKey = "" var TurnstileSecretKey = ""
var QuotaForNewUser = 0 var QuotaForNewUser int64 = 0
var QuotaForInviter = 0 var QuotaForInviter int64 = 0
var QuotaForInvitee = 0 var QuotaForInvitee int64 = 0
var ChannelDisableThreshold = 5.0 var ChannelDisableThreshold = 5.0
var AutomaticDisableChannelEnabled = false var AutomaticDisableChannelEnabled = false
var AutomaticEnableChannelEnabled = false var AutomaticEnableChannelEnabled = false
var QuotaRemindThreshold = 1000 var QuotaRemindThreshold int64 = 1000
var PreConsumedQuota = 500 var PreConsumedQuota int64 = 500
var ApproximateTokenEnabled = false var ApproximateTokenEnabled = false
var RetryTimes = 0 var RetryTimes = 0
@@ -90,16 +94,16 @@ var IsMasterNode = os.Getenv("NODE_TYPE") != "slave"
var requestInterval, _ = strconv.Atoi(os.Getenv("POLLING_INTERVAL")) var requestInterval, _ = strconv.Atoi(os.Getenv("POLLING_INTERVAL"))
var RequestInterval = time.Duration(requestInterval) * time.Second var RequestInterval = time.Duration(requestInterval) * time.Second
var SyncFrequency = helper.GetOrDefaultEnvInt("SYNC_FREQUENCY", 10*60) // unit is second var SyncFrequency = env.Int("SYNC_FREQUENCY", 10*60) // unit is second
var BatchUpdateEnabled = false var BatchUpdateEnabled = false
var BatchUpdateInterval = helper.GetOrDefaultEnvInt("BATCH_UPDATE_INTERVAL", 5) var BatchUpdateInterval = env.Int("BATCH_UPDATE_INTERVAL", 5)
var RelayTimeout = helper.GetOrDefaultEnvInt("RELAY_TIMEOUT", 0) // unit is second var RelayTimeout = env.Int("RELAY_TIMEOUT", 0) // unit is second
var GeminiSafetySetting = helper.GetOrDefaultEnvString("GEMINI_SAFETY_SETTING", "BLOCK_NONE") var GeminiSafetySetting = env.String("GEMINI_SAFETY_SETTING", "BLOCK_NONE")
var Theme = helper.GetOrDefaultEnvString("THEME", "default") var Theme = env.String("THEME", "default")
var ValidThemes = map[string]bool{ var ValidThemes = map[string]bool{
"default": true, "default": true,
"berry": true, "berry": true,
@@ -108,10 +112,10 @@ var ValidThemes = map[string]bool{
// All duration's unit is seconds // All duration's unit is seconds
// Shouldn't larger then RateLimitKeyExpirationDuration // Shouldn't larger then RateLimitKeyExpirationDuration
var ( var (
GlobalApiRateLimitNum = helper.GetOrDefaultEnvInt("GLOBAL_API_RATE_LIMIT", 180) GlobalApiRateLimitNum = env.Int("GLOBAL_API_RATE_LIMIT", 180)
GlobalApiRateLimitDuration int64 = 3 * 60 GlobalApiRateLimitDuration int64 = 3 * 60
GlobalWebRateLimitNum = helper.GetOrDefaultEnvInt("GLOBAL_WEB_RATE_LIMIT", 60) GlobalWebRateLimitNum = env.Int("GLOBAL_WEB_RATE_LIMIT", 60)
GlobalWebRateLimitDuration int64 = 3 * 60 GlobalWebRateLimitDuration int64 = 3 * 60
UploadRateLimitNum = 10 UploadRateLimitNum = 10
@@ -125,3 +129,9 @@ var (
) )
var RateLimitKeyExpirationDuration = 20 * time.Minute var RateLimitKeyExpirationDuration = 20 * time.Minute
var EnableMetric = env.Bool("ENABLE_METRIC", false)
var MetricQueueSize = env.Int("METRIC_QUEUE_SIZE", 10)
var MetricSuccessRateThreshold = env.Float64("METRIC_SUCCESS_RATE_THRESHOLD", 0.8)
var MetricSuccessChanSize = env.Int("METRIC_SUCCESS_CHAN_SIZE", 1024)
var MetricFailChanSize = env.Int("METRIC_FAIL_CHAN_SIZE", 128)

View File

@@ -15,6 +15,7 @@ const (
const ( const (
UserStatusEnabled = 1 // don't use 0, 0 is the default value! UserStatusEnabled = 1 // don't use 0, 0 is the default value!
UserStatusDisabled = 2 // also don't use 0 UserStatusDisabled = 2 // also don't use 0
UserStatusDeleted = 3
) )
const ( const (
@@ -38,31 +39,40 @@ const (
) )
const ( const (
ChannelTypeUnknown = 0 ChannelTypeUnknown = iota
ChannelTypeOpenAI = 1 ChannelTypeOpenAI
ChannelTypeAPI2D = 2 ChannelTypeAPI2D
ChannelTypeAzure = 3 ChannelTypeAzure
ChannelTypeCloseAI = 4 ChannelTypeCloseAI
ChannelTypeOpenAISB = 5 ChannelTypeOpenAISB
ChannelTypeOpenAIMax = 6 ChannelTypeOpenAIMax
ChannelTypeOhMyGPT = 7 ChannelTypeOhMyGPT
ChannelTypeCustom = 8 ChannelTypeCustom
ChannelTypeAILS = 9 ChannelTypeAILS
ChannelTypeAIProxy = 10 ChannelTypeAIProxy
ChannelTypePaLM = 11 ChannelTypePaLM
ChannelTypeAPI2GPT = 12 ChannelTypeAPI2GPT
ChannelTypeAIGC2D = 13 ChannelTypeAIGC2D
ChannelTypeAnthropic = 14 ChannelTypeAnthropic
ChannelTypeBaidu = 15 ChannelTypeBaidu
ChannelTypeZhipu = 16 ChannelTypeZhipu
ChannelTypeAli = 17 ChannelTypeAli
ChannelTypeXunfei = 18 ChannelTypeXunfei
ChannelType360 = 19 ChannelType360
ChannelTypeOpenRouter = 20 ChannelTypeOpenRouter
ChannelTypeAIProxyLibrary = 21 ChannelTypeAIProxyLibrary
ChannelTypeFastGPT = 22 ChannelTypeFastGPT
ChannelTypeTencent = 23 ChannelTypeTencent
ChannelTypeGemini = 24 ChannelTypeGemini
ChannelTypeMoonshot
ChannelTypeBaichuan
ChannelTypeMinimax
ChannelTypeMistral
ChannelTypeGroq
ChannelTypeOllama
ChannelTypeLingYiWanWu
ChannelTypeDummy
) )
var ChannelBaseURLs = []string{ var ChannelBaseURLs = []string{
@@ -91,4 +101,19 @@ var ChannelBaseURLs = []string{
"https://fastgpt.run/api/openapi", // 22 "https://fastgpt.run/api/openapi", // 22
"https://hunyuan.cloud.tencent.com", // 23 "https://hunyuan.cloud.tencent.com", // 23
"https://generativelanguage.googleapis.com", // 24 "https://generativelanguage.googleapis.com", // 24
"https://api.moonshot.cn", // 25
"https://api.baichuan-ai.com", // 26
"https://api.minimax.chat", // 27
"https://api.mistral.ai", // 28
"https://api.groq.com/openai", // 29
"http://localhost:11434", // 30
"https://api.lingyiwanwu.com", // 31
} }
const (
ConfigKeyPrefix = "cfg_"
ConfigKeyAPIVersion = ConfigKeyPrefix + "api_version"
ConfigKeyLibraryID = ConfigKeyPrefix + "library_id"
ConfigKeyPlugin = ConfigKeyPrefix + "plugin"
)

View File

@@ -1,9 +1,12 @@
package common package common
import "one-api/common/helper" import (
"github.com/songquanpeng/one-api/common/env"
)
var UsingSQLite = false var UsingSQLite = false
var UsingPostgreSQL = false var UsingPostgreSQL = false
var UsingMySQL = false
var SQLitePath = "one-api.db" var SQLitePath = "one-api.db"
var SQLiteBusyTimeout = helper.GetOrDefaultEnvInt("SQLITE_BUSY_TIMEOUT", 3000) var SQLiteBusyTimeout = env.Int("SQLITE_BUSY_TIMEOUT", 3000)

View File

@@ -15,10 +15,7 @@ type embedFileSystem struct {
func (e embedFileSystem) Exists(prefix string, path string) bool { func (e embedFileSystem) Exists(prefix string, path string) bool {
_, err := e.Open(path) _, err := e.Open(path)
if err != nil { return err == nil
return false
}
return true
} }
func EmbedFolder(fsEmbed embed.FS, targetPath string) static.ServeFileSystem { func EmbedFolder(fsEmbed embed.FS, targetPath string) static.ServeFileSystem {

42
common/env/helper.go vendored Normal file
View File

@@ -0,0 +1,42 @@
package env
import (
"os"
"strconv"
)
func Bool(env string, defaultValue bool) bool {
if env == "" || os.Getenv(env) == "" {
return defaultValue
}
return os.Getenv(env) == "true"
}
func Int(env string, defaultValue int) int {
if env == "" || os.Getenv(env) == "" {
return defaultValue
}
num, err := strconv.Atoi(os.Getenv(env))
if err != nil {
return defaultValue
}
return num
}
func Float64(env string, defaultValue float64) float64 {
if env == "" || os.Getenv(env) == "" {
return defaultValue
}
num, err := strconv.ParseFloat(os.Getenv(env), 64)
if err != nil {
return defaultValue
}
return num
}
func String(env string, defaultValue string) string {
if env == "" || os.Getenv(env) == "" {
return defaultValue
}
return os.Getenv(env)
}

View File

@@ -8,12 +8,24 @@ import (
"strings" "strings"
) )
func UnmarshalBodyReusable(c *gin.Context, v any) error { const KeyRequestBody = "key_request_body"
func GetRequestBody(c *gin.Context) ([]byte, error) {
requestBody, _ := c.Get(KeyRequestBody)
if requestBody != nil {
return requestBody.([]byte), nil
}
requestBody, err := io.ReadAll(c.Request.Body) requestBody, err := io.ReadAll(c.Request.Body)
if err != nil { if err != nil {
return err return nil, err
} }
err = c.Request.Body.Close() _ = c.Request.Body.Close()
c.Set(KeyRequestBody, requestBody)
return requestBody.([]byte), nil
}
func UnmarshalBodyReusable(c *gin.Context, v any) error {
requestBody, err := GetRequestBody(c)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -2,7 +2,7 @@ package common
import ( import (
"encoding/json" "encoding/json"
"one-api/common/logger" "github.com/songquanpeng/one-api/common/logger"
) )
var GroupRatio = map[string]float64{ var GroupRatio = map[string]float64{

View File

@@ -7,8 +7,6 @@ import (
"log" "log"
"math/rand" "math/rand"
"net" "net"
"one-api/common/logger"
"os"
"os/exec" "os/exec"
"runtime" "runtime"
"strconv" "strconv"
@@ -107,13 +105,13 @@ func Seconds2Time(num int) (time string) {
} }
func Interface2String(inter interface{}) string { func Interface2String(inter interface{}) string {
switch inter.(type) { switch inter := inter.(type) {
case string: case string:
return inter.(string) return inter
case int: case int:
return fmt.Sprintf("%d", inter.(int)) return fmt.Sprintf("%d", inter)
case float64: case float64:
return fmt.Sprintf("%f", inter.(float64)) return fmt.Sprintf("%f", inter)
} }
return "Not Implemented" return "Not Implemented"
} }
@@ -137,6 +135,7 @@ func GetUUID() string {
} }
const keyChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" const keyChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
const keyNumbers = "0123456789"
func init() { func init() {
rand.Seed(time.Now().UnixNano()) rand.Seed(time.Now().UnixNano())
@@ -168,6 +167,15 @@ func GetRandomString(length int) string {
return string(key) return string(key)
} }
func GetRandomNumberString(length int) string {
rand.Seed(time.Now().UnixNano())
key := make([]byte, length)
for i := 0; i < length; i++ {
key[i] = keyNumbers[rand.Intn(len(keyNumbers))]
}
return string(key)
}
func GetTimestamp() int64 { func GetTimestamp() int64 {
return time.Now().Unix() return time.Now().Unix()
} }
@@ -177,6 +185,10 @@ func GetTimeString() string {
return fmt.Sprintf("%s%d", now.Format("20060102150405"), now.UnixNano()%1e9) return fmt.Sprintf("%s%d", now.Format("20060102150405"), now.UnixNano()%1e9)
} }
func GenRequestID() string {
return GetTimeString() + GetRandomNumberString(8)
}
func Max(a int, b int) int { func Max(a int, b int) int {
if a >= b { if a >= b {
return a return a
@@ -185,25 +197,6 @@ func Max(a int, b int) int {
} }
} }
func GetOrDefaultEnvInt(env string, defaultValue int) int {
if env == "" || os.Getenv(env) == "" {
return defaultValue
}
num, err := strconv.Atoi(os.Getenv(env))
if err != nil {
logger.SysError(fmt.Sprintf("failed to parse %s: %s, using default value: %d", env, err.Error(), defaultValue))
return defaultValue
}
return num
}
func GetOrDefaultEnvString(env string, defaultValue string) string {
if env == "" || os.Getenv(env) == "" {
return defaultValue
}
return os.Getenv(env)
}
func AssignOrDefault(value string, defaultValue string) string { func AssignOrDefault(value string, defaultValue string) string {
if len(value) != 0 { if len(value) != 0 {
return value return value

View File

@@ -12,7 +12,7 @@ import (
"strings" "strings"
"testing" "testing"
img "one-api/common/image" img "github.com/songquanpeng/one-api/common/image"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
_ "golang.org/x/image/webp" _ "golang.org/x/image/webp"

View File

@@ -3,9 +3,9 @@ package common
import ( import (
"flag" "flag"
"fmt" "fmt"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/logger"
"log" "log"
"one-api/common/config"
"one-api/common/logger"
"os" "os"
"path/filepath" "path/filepath"
) )

View File

@@ -4,6 +4,8 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
"io" "io"
"log" "log"
"os" "os"
@@ -13,14 +15,12 @@ import (
) )
const ( const (
loggerDEBUG = "DEBUG"
loggerINFO = "INFO" loggerINFO = "INFO"
loggerWarn = "WARN" loggerWarn = "WARN"
loggerError = "ERR" loggerError = "ERR"
) )
const maxLogCount = 1000000
var logCount int
var setupLogLock sync.Mutex var setupLogLock sync.Mutex
var setupLogWorking bool var setupLogWorking bool
@@ -55,6 +55,12 @@ func SysError(s string) {
_, _ = fmt.Fprintf(gin.DefaultErrorWriter, "[SYS] %v | %s \n", t.Format("2006/01/02 - 15:04:05"), s) _, _ = fmt.Fprintf(gin.DefaultErrorWriter, "[SYS] %v | %s \n", t.Format("2006/01/02 - 15:04:05"), s)
} }
func Debug(ctx context.Context, msg string) {
if config.DebugEnabled {
logHelper(ctx, loggerDEBUG, msg)
}
}
func Info(ctx context.Context, msg string) { func Info(ctx context.Context, msg string) {
logHelper(ctx, loggerINFO, msg) logHelper(ctx, loggerINFO, msg)
} }
@@ -67,16 +73,20 @@ func Error(ctx context.Context, msg string) {
logHelper(ctx, loggerError, msg) logHelper(ctx, loggerError, msg)
} }
func Debugf(ctx context.Context, format string, a ...any) {
Debug(ctx, fmt.Sprintf(format, a...))
}
func Infof(ctx context.Context, format string, a ...any) { func Infof(ctx context.Context, format string, a ...any) {
Info(ctx, fmt.Sprintf(format, a)) Info(ctx, fmt.Sprintf(format, a...))
} }
func Warnf(ctx context.Context, format string, a ...any) { func Warnf(ctx context.Context, format string, a ...any) {
Warn(ctx, fmt.Sprintf(format, a)) Warn(ctx, fmt.Sprintf(format, a...))
} }
func Errorf(ctx context.Context, format string, a ...any) { func Errorf(ctx context.Context, format string, a ...any) {
Error(ctx, fmt.Sprintf(format, a)) Error(ctx, fmt.Sprintf(format, a...))
} }
func logHelper(ctx context.Context, level string, msg string) { func logHelper(ctx context.Context, level string, msg string) {
@@ -85,11 +95,12 @@ func logHelper(ctx context.Context, level string, msg string) {
writer = gin.DefaultWriter writer = gin.DefaultWriter
} }
id := ctx.Value(RequestIdKey) id := ctx.Value(RequestIdKey)
if id == nil {
id = helper.GenRequestID()
}
now := time.Now() now := time.Now()
_, _ = fmt.Fprintf(writer, "[%s] %v | %s | %s \n", level, now.Format("2006/01/02 - 15:04:05"), id, msg) _, _ = fmt.Fprintf(writer, "[%s] %v | %s | %s \n", level, now.Format("2006/01/02 - 15:04:05"), id, msg)
logCount++ // we don't need accurate count, so no lock here if !setupLogWorking {
if logCount > maxLogCount && !setupLogWorking {
logCount = 0
setupLogWorking = true setupLogWorking = true
go func() { go func() {
SetupLogger() SetupLogger()

View File

@@ -1,17 +1,20 @@
package common package message
import ( import (
"crypto/rand" "crypto/rand"
"crypto/tls" "crypto/tls"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"github.com/songquanpeng/one-api/common/config"
"net/smtp" "net/smtp"
"one-api/common/config"
"strings" "strings"
"time" "time"
) )
func SendEmail(subject string, receiver string, content string) error { func SendEmail(subject string, receiver string, content string) error {
if receiver == "" {
return fmt.Errorf("receiver is empty")
}
if config.SMTPFrom == "" { // for compatibility if config.SMTPFrom == "" { // for compatibility
config.SMTPFrom = config.SMTPAccount config.SMTPFrom = config.SMTPAccount
} }

22
common/message/main.go Normal file
View File

@@ -0,0 +1,22 @@
package message
import (
"fmt"
"github.com/songquanpeng/one-api/common/config"
)
const (
ByAll = "all"
ByEmail = "email"
ByMessagePusher = "message_pusher"
)
func Notify(by string, title string, description string, content string) error {
if by == ByEmail {
return SendEmail(title, config.RootUserEmail, content)
}
if by == ByMessagePusher {
return SendMessage(title, description, content)
}
return fmt.Errorf("unknown notify method: %s", by)
}

View File

@@ -0,0 +1,53 @@
package message
import (
"bytes"
"encoding/json"
"errors"
"github.com/songquanpeng/one-api/common/config"
"net/http"
)
type request struct {
Title string `json:"title"`
Description string `json:"description"`
Content string `json:"content"`
URL string `json:"url"`
Channel string `json:"channel"`
Token string `json:"token"`
}
type response struct {
Success bool `json:"success"`
Message string `json:"message"`
}
func SendMessage(title string, description string, content string) error {
if config.MessagePusherAddress == "" {
return errors.New("message pusher address is not set")
}
req := request{
Title: title,
Description: description,
Content: content,
Token: config.MessagePusherToken,
}
data, err := json.Marshal(req)
if err != nil {
return err
}
resp, err := http.Post(config.MessagePusherAddress,
"application/json", bytes.NewBuffer(data))
if err != nil {
return err
}
var res response
err = json.NewDecoder(resp.Body).Decode(&res)
if err != nil {
return err
}
if !res.Success {
return errors.New(res.Message)
}
return nil
}

View File

@@ -2,92 +2,92 @@ package common
import ( import (
"encoding/json" "encoding/json"
"one-api/common/logger" "github.com/songquanpeng/one-api/common/logger"
"strings" "strings"
"time" "time"
) )
var DalleSizeRatios = map[string]map[string]float64{ const (
"dall-e-2": { USD2RMB = 7
"256x256": 1, USD = 500 // $0.002 = 1 -> $1 = 500
"512x512": 1.125, RMB = USD / USD2RMB
"1024x1024": 1.25, )
},
"dall-e-3": {
"1024x1024": 1,
"1024x1792": 2,
"1792x1024": 2,
},
}
var DalleGenerationImageAmounts = map[string][2]int{
"dall-e-2": {1, 10},
"dall-e-3": {1, 1}, // OpenAI allows n=1 currently.
}
var DalleImagePromptLengthLimitations = map[string]int{
"dall-e-2": 1000,
"dall-e-3": 4000,
}
// ModelRatio // ModelRatio
// https://platform.openai.com/docs/models/model-endpoint-compatibility // https://platform.openai.com/docs/models/model-endpoint-compatibility
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Blfmc9dlf // https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Blfmc9dlf
// https://openai.com/pricing // https://openai.com/pricing
// TODO: when a new api is enabled, check the pricing here
// 1 === $0.002 / 1K tokens // 1 === $0.002 / 1K tokens
// 1 === ¥0.014 / 1k tokens // 1 === ¥0.014 / 1k tokens
var ModelRatio = map[string]float64{ var ModelRatio = map[string]float64{
"gpt-4": 15, // https://openai.com/pricing
"gpt-4-0314": 15, "gpt-4": 15,
"gpt-4-0613": 15, "gpt-4-0314": 15,
"gpt-4-32k": 30, "gpt-4-0613": 15,
"gpt-4-32k-0314": 30, "gpt-4-32k": 30,
"gpt-4-32k-0613": 30, "gpt-4-32k-0314": 30,
"gpt-4-1106-preview": 5, // $0.01 / 1K tokens "gpt-4-32k-0613": 30,
"gpt-4-vision-preview": 5, // $0.01 / 1K tokens "gpt-4-1106-preview": 5, // $0.01 / 1K tokens
"gpt-3.5-turbo": 0.75, // $0.0015 / 1K tokens "gpt-4-0125-preview": 5, // $0.01 / 1K tokens
"gpt-3.5-turbo-0301": 0.75, "gpt-4-turbo-preview": 5, // $0.01 / 1K tokens
"gpt-3.5-turbo-0613": 0.75, "gpt-4-vision-preview": 5, // $0.01 / 1K tokens
"gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens "gpt-3.5-turbo": 0.75, // $0.0015 / 1K tokens
"gpt-3.5-turbo-16k-0613": 1.5, "gpt-3.5-turbo-0301": 0.75,
"gpt-3.5-turbo-instruct": 0.75, // $0.0015 / 1K tokens "gpt-3.5-turbo-0613": 0.75,
"gpt-3.5-turbo-1106": 0.5, // $0.001 / 1K tokens "gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens
"davinci-002": 1, // $0.002 / 1K tokens "gpt-3.5-turbo-16k-0613": 1.5,
"babbage-002": 0.2, // $0.0004 / 1K tokens "gpt-3.5-turbo-instruct": 0.75, // $0.0015 / 1K tokens
"text-ada-001": 0.2, "gpt-3.5-turbo-1106": 0.5, // $0.001 / 1K tokens
"text-babbage-001": 0.25, "gpt-3.5-turbo-0125": 0.25, // $0.0005 / 1K tokens
"text-curie-001": 1, "davinci-002": 1, // $0.002 / 1K tokens
"text-davinci-002": 10, "babbage-002": 0.2, // $0.0004 / 1K tokens
"text-davinci-003": 10, "text-ada-001": 0.2,
"text-davinci-edit-001": 10, "text-babbage-001": 0.25,
"code-davinci-edit-001": 10, "text-curie-001": 1,
"whisper-1": 15, // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens "text-davinci-002": 10,
"tts-1": 7.5, // $0.015 / 1K characters "text-davinci-003": 10,
"tts-1-1106": 7.5, "text-davinci-edit-001": 10,
"tts-1-hd": 15, // $0.030 / 1K characters "code-davinci-edit-001": 10,
"tts-1-hd-1106": 15, "whisper-1": 15, // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
"davinci": 10, "tts-1": 7.5, // $0.015 / 1K characters
"curie": 10, "tts-1-1106": 7.5,
"babbage": 10, "tts-1-hd": 15, // $0.030 / 1K characters
"ada": 10, "tts-1-hd-1106": 15,
"text-embedding-ada-002": 0.05, "davinci": 10,
"text-search-ada-doc-001": 10, "curie": 10,
"text-moderation-stable": 0.1, "babbage": 10,
"text-moderation-latest": 0.1, "ada": 10,
"dall-e-2": 8, // $0.016 - $0.020 / image "text-embedding-ada-002": 0.05,
"dall-e-3": 20, // $0.040 - $0.120 / image "text-embedding-3-small": 0.01,
"claude-instant-1": 0.815, // $1.63 / 1M tokens "text-embedding-3-large": 0.065,
"claude-2": 5.51, // $11.02 / 1M tokens "text-search-ada-doc-001": 10,
"claude-2.0": 5.51, // $11.02 / 1M tokens "text-moderation-stable": 0.1,
"claude-2.1": 5.51, // $11.02 / 1M tokens "text-moderation-latest": 0.1,
"ERNIE-Bot": 0.8572, // 0.012 / 1k tokens "dall-e-2": 8, // $0.016 - $0.020 / image
"ERNIE-Bot-turbo": 0.5715, // 0.008 / 1k tokens "dall-e-3": 20, // $0.040 - $0.120 / image
"ERNIE-Bot-4": 8.572, // ¥0.12 / 1k tokens // https://www.anthropic.com/api#pricing
"Embedding-V1": 0.1429, // ¥0.002 / 1k tokens "claude-instant-1.2": 0.8 / 1000 * USD,
"PaLM-2": 1, "claude-2.0": 8.0 / 1000 * USD,
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens "claude-2.1": 8.0 / 1000 * USD,
"gemini-pro-vision": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens "claude-3-haiku-20240307": 0.25 / 1000 * USD,
"claude-3-sonnet-20240229": 3.0 / 1000 * USD,
"claude-3-opus-20240229": 15.0 / 1000 * USD,
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens
"ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens
"ERNIE-Bot-4": 0.12 * RMB, // ¥0.12 / 1k tokens
"ERNIE-Bot-8k": 0.024 * RMB,
"Embedding-V1": 0.1429, // ¥0.002 / 1k tokens
"bge-large-zh": 0.002 * RMB,
"bge-large-en": 0.002 * RMB,
"bge-large-8k": 0.002 * RMB,
"PaLM-2": 1,
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
"gemini-pro-vision": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
// https://open.bigmodel.cn/pricing
"glm-4": 0.1 * RMB,
"glm-4v": 0.1 * RMB,
"glm-3-turbo": 0.005 * RMB,
"chatglm_turbo": 0.3572, // ¥0.005 / 1k tokens "chatglm_turbo": 0.3572, // ¥0.005 / 1k tokens
"chatglm_pro": 0.7143, // ¥0.01 / 1k tokens "chatglm_pro": 0.7143, // ¥0.01 / 1k tokens
"chatglm_std": 0.3572, // ¥0.005 / 1k tokens "chatglm_std": 0.3572, // ¥0.005 / 1k tokens
@@ -98,11 +98,81 @@ var ModelRatio = map[string]float64{
"qwen-max-longcontext": 1.4286, // ¥0.02 / 1k tokens "qwen-max-longcontext": 1.4286, // ¥0.02 / 1k tokens
"text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens "text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens
"SparkDesk": 1.2858, // ¥0.018 / 1k tokens "SparkDesk": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v1.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens "360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens "embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens "embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens "semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"hunyuan": 7.143, // ¥0.1 / 1k tokens // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0 "hunyuan": 7.143, // ¥0.1 / 1k tokens // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
"ChatStd": 0.01 * RMB,
"ChatPro": 0.1 * RMB,
// https://platform.moonshot.cn/pricing
"moonshot-v1-8k": 0.012 * RMB,
"moonshot-v1-32k": 0.024 * RMB,
"moonshot-v1-128k": 0.06 * RMB,
// https://platform.baichuan-ai.com/price
"Baichuan2-Turbo": 0.008 * RMB,
"Baichuan2-Turbo-192k": 0.016 * RMB,
"Baichuan2-53B": 0.02 * RMB,
// https://api.minimax.chat/document/price
"abab6-chat": 0.1 * RMB,
"abab5.5-chat": 0.015 * RMB,
"abab5.5s-chat": 0.005 * RMB,
// https://docs.mistral.ai/platform/pricing/
"open-mistral-7b": 0.25 / 1000 * USD,
"open-mixtral-8x7b": 0.7 / 1000 * USD,
"mistral-small-latest": 2.0 / 1000 * USD,
"mistral-medium-latest": 2.7 / 1000 * USD,
"mistral-large-latest": 8.0 / 1000 * USD,
"mistral-embed": 0.1 / 1000 * USD,
// https://wow.groq.com/
"llama2-70b-4096": 0.7 / 1000 * USD,
"llama2-7b-2048": 0.1 / 1000 * USD,
"mixtral-8x7b-32768": 0.27 / 1000 * USD,
"gemma-7b-it": 0.1 / 1000 * USD,
// https://platform.lingyiwanwu.com/docs#-计费单元
"yi-34b-chat-0205": 2.5 / 1000000 * RMB,
"yi-34b-chat-200k": 12.0 / 1000000 * RMB,
"yi-vl-plus": 6.0 / 1000000 * RMB,
}
var CompletionRatio = map[string]float64{}
var DefaultModelRatio map[string]float64
var DefaultCompletionRatio map[string]float64
func init() {
DefaultModelRatio = make(map[string]float64)
for k, v := range ModelRatio {
DefaultModelRatio[k] = v
}
DefaultCompletionRatio = make(map[string]float64)
for k, v := range CompletionRatio {
DefaultCompletionRatio[k] = v
}
}
func AddNewMissingRatio(oldRatio string) string {
newRatio := make(map[string]float64)
err := json.Unmarshal([]byte(oldRatio), &newRatio)
if err != nil {
logger.SysError("error unmarshalling old ratio: " + err.Error())
return oldRatio
}
for k, v := range DefaultModelRatio {
if _, ok := newRatio[k]; !ok {
newRatio[k] = v
}
}
jsonBytes, err := json.Marshal(newRatio)
if err != nil {
logger.SysError("error marshalling new ratio: " + err.Error())
return oldRatio
}
return string(jsonBytes)
} }
func ModelRatio2JSONString() string { func ModelRatio2JSONString() string {
@@ -123,6 +193,9 @@ func GetModelRatio(name string) float64 {
name = strings.TrimSuffix(name, "-internet") name = strings.TrimSuffix(name, "-internet")
} }
ratio, ok := ModelRatio[name] ratio, ok := ModelRatio[name]
if !ok {
ratio, ok = DefaultModelRatio[name]
}
if !ok { if !ok {
logger.SysError("model ratio not found: " + name) logger.SysError("model ratio not found: " + name)
return 30 return 30
@@ -130,8 +203,32 @@ func GetModelRatio(name string) float64 {
return ratio return ratio
} }
func CompletionRatio2JSONString() string {
jsonBytes, err := json.Marshal(CompletionRatio)
if err != nil {
logger.SysError("error marshalling completion ratio: " + err.Error())
}
return string(jsonBytes)
}
func UpdateCompletionRatioByJSONString(jsonStr string) error {
CompletionRatio = make(map[string]float64)
return json.Unmarshal([]byte(jsonStr), &CompletionRatio)
}
func GetCompletionRatio(name string) float64 { func GetCompletionRatio(name string) float64 {
if ratio, ok := CompletionRatio[name]; ok {
return ratio
}
if ratio, ok := DefaultCompletionRatio[name]; ok {
return ratio
}
if strings.HasPrefix(name, "gpt-3.5") { if strings.HasPrefix(name, "gpt-3.5") {
if strings.HasSuffix(name, "0125") {
// https://openai.com/blog/new-embedding-models-and-api-updates
// Updated GPT-3.5 Turbo model and lower pricing
return 3
}
if strings.HasSuffix(name, "1106") { if strings.HasSuffix(name, "1106") {
return 2 return 2
} }
@@ -144,7 +241,7 @@ func GetCompletionRatio(name string) float64 {
return 2 return 2
} }
} }
return 1.333333 return 4.0 / 3.0
} }
if strings.HasPrefix(name, "gpt-4") { if strings.HasPrefix(name, "gpt-4") {
if strings.HasSuffix(name, "preview") { if strings.HasSuffix(name, "preview") {
@@ -152,11 +249,18 @@ func GetCompletionRatio(name string) float64 {
} }
return 2 return 2
} }
if strings.HasPrefix(name, "claude-instant-1") { if strings.HasPrefix(name, "claude-3") {
return 3.38 return 5
} }
if strings.HasPrefix(name, "claude-2") { if strings.HasPrefix(name, "claude-") {
return 2.965517 return 3
}
if strings.HasPrefix(name, "mistral-") {
return 3
}
switch name {
case "llama2-70b-4096":
return 0.8 / 0.7
} }
return 1 return 1
} }

8
common/random.go Normal file
View File

@@ -0,0 +1,8 @@
package common
import "math/rand"
// RandRange returns a random number between min and max (max is not included)
func RandRange(min, max int) int {
return min + rand.Intn(max-min)
}

View File

@@ -3,7 +3,7 @@ package common
import ( import (
"context" "context"
"github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
"one-api/common/logger" "github.com/songquanpeng/one-api/common/logger"
"os" "os"
"time" "time"
) )

View File

@@ -2,10 +2,10 @@ package common
import ( import (
"fmt" "fmt"
"one-api/common/config" "github.com/songquanpeng/one-api/common/config"
) )
func LogQuota(quota int) string { func LogQuota(quota int64) string {
if config.DisplayInCurrencyEnabled { if config.DisplayInCurrencyEnabled {
return fmt.Sprintf("%.6f 额度", float64(quota)/config.QuotaPerUnit) return fmt.Sprintf("%.6f 额度", float64(quota)/config.QuotaPerUnit)
} else { } else {

View File

@@ -2,14 +2,14 @@ package controller
import ( import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"one-api/common/config" "github.com/songquanpeng/one-api/common/config"
"one-api/model" "github.com/songquanpeng/one-api/model"
"one-api/relay/channel/openai" relaymodel "github.com/songquanpeng/one-api/relay/model"
) )
func GetSubscription(c *gin.Context) { func GetSubscription(c *gin.Context) {
var remainQuota int var remainQuota int64
var usedQuota int var usedQuota int64
var err error var err error
var token *model.Token var token *model.Token
var expiredTime int64 var expiredTime int64
@@ -22,13 +22,15 @@ func GetSubscription(c *gin.Context) {
} else { } else {
userId := c.GetInt("id") userId := c.GetInt("id")
remainQuota, err = model.GetUserQuota(userId) remainQuota, err = model.GetUserQuota(userId)
usedQuota, err = model.GetUserUsedQuota(userId) if err != nil {
usedQuota, err = model.GetUserUsedQuota(userId)
}
} }
if expiredTime <= 0 { if expiredTime <= 0 {
expiredTime = 0 expiredTime = 0
} }
if err != nil { if err != nil {
Error := openai.Error{ Error := relaymodel.Error{
Message: err.Error(), Message: err.Error(),
Type: "upstream_error", Type: "upstream_error",
} }
@@ -58,7 +60,7 @@ func GetSubscription(c *gin.Context) {
} }
func GetUsage(c *gin.Context) { func GetUsage(c *gin.Context) {
var quota int var quota int64
var err error var err error
var token *model.Token var token *model.Token
if config.DisplayTokenStatEnabled { if config.DisplayTokenStatEnabled {
@@ -70,7 +72,7 @@ func GetUsage(c *gin.Context) {
quota, err = model.GetUserUsedQuota(userId) quota, err = model.GetUserUsedQuota(userId)
} }
if err != nil { if err != nil {
Error := openai.Error{ Error := relaymodel.Error{
Message: err.Error(), Message: err.Error(),
Type: "one_api_error", Type: "one_api_error",
} }

View File

@@ -4,13 +4,14 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/model"
"github.com/songquanpeng/one-api/monitor"
"github.com/songquanpeng/one-api/relay/util"
"io" "io"
"net/http" "net/http"
"one-api/common"
"one-api/common/config"
"one-api/common/logger"
"one-api/model"
"one-api/relay/util"
"strconv" "strconv"
"time" "time"
@@ -295,7 +296,7 @@ func UpdateChannelBalance(c *gin.Context) {
} }
func updateAllChannelsBalance() error { func updateAllChannelsBalance() error {
channels, err := model.GetAllChannels(0, 0, true) channels, err := model.GetAllChannels(0, 0, "all")
if err != nil { if err != nil {
return err return err
} }
@@ -313,7 +314,7 @@ func updateAllChannelsBalance() error {
} else { } else {
// err is nil & balance <= 0 means quota is used up // err is nil & balance <= 0 means quota is used up
if balance <= 0 { if balance <= 0 {
disableChannel(channel.Id, channel.Name, "余额不足") monitor.DisableChannel(channel.Id, channel.Name, "余额不足")
} }
} }
time.Sleep(config.RequestInterval) time.Sleep(config.RequestInterval)
@@ -322,15 +323,14 @@ func updateAllChannelsBalance() error {
} }
func UpdateAllChannelsBalance(c *gin.Context) { func UpdateAllChannelsBalance(c *gin.Context) {
// TODO: make it async //err := updateAllChannelsBalance()
err := updateAllChannelsBalance() //if err != nil {
if err != nil { // c.JSON(http.StatusOK, gin.H{
c.JSON(http.StatusOK, gin.H{ // "success": false,
"success": false, // "message": err.Error(),
"message": err.Error(), // })
}) // return
return //}
}
c.JSON(http.StatusOK, gin.H{ c.JSON(http.StatusOK, gin.H{
"success": true, "success": true,
"message": "", "message": "",

View File

@@ -5,102 +5,36 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/common/message"
"github.com/songquanpeng/one-api/middleware"
"github.com/songquanpeng/one-api/model"
"github.com/songquanpeng/one-api/monitor"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/helper"
relaymodel "github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io" "io"
"net/http" "net/http"
"one-api/common" "net/http/httptest"
"one-api/common/config" "net/url"
"one-api/common/logger"
"one-api/model"
"one-api/relay/channel/openai"
"one-api/relay/util"
"strconv" "strconv"
"strings"
"sync" "sync"
"time" "time"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
func testChannel(channel *model.Channel, request openai.ChatRequest) (err error, openaiErr *openai.Error) { func buildTestRequest() *relaymodel.GeneralOpenAIRequest {
switch channel.Type { testRequest := &relaymodel.GeneralOpenAIRequest{
case common.ChannelTypePaLM: MaxTokens: 2,
fallthrough Stream: false,
case common.ChannelTypeGemini: Model: "gpt-3.5-turbo",
fallthrough
case common.ChannelTypeAnthropic:
fallthrough
case common.ChannelTypeBaidu:
fallthrough
case common.ChannelTypeZhipu:
fallthrough
case common.ChannelTypeAli:
fallthrough
case common.ChannelType360:
fallthrough
case common.ChannelTypeXunfei:
return errors.New("该渠道类型当前版本不支持测试,请手动测试"), nil
case common.ChannelTypeAzure:
request.Model = "gpt-35-turbo"
defer func() {
if err != nil {
err = errors.New("请确保已在 Azure 上创建了 gpt-35-turbo 模型,并且 apiVersion 已正确填写!")
}
}()
default:
request.Model = "gpt-3.5-turbo"
} }
requestURL := common.ChannelBaseURLs[channel.Type] testMessage := relaymodel.Message{
if channel.Type == common.ChannelTypeAzure {
requestURL = util.GetFullRequestURL(channel.GetBaseURL(), fmt.Sprintf("/openai/deployments/%s/chat/completions?api-version=2023-03-15-preview", request.Model), channel.Type)
} else {
if baseURL := channel.GetBaseURL(); len(baseURL) > 0 {
requestURL = baseURL
}
requestURL = util.GetFullRequestURL(requestURL, "/v1/chat/completions", channel.Type)
}
jsonData, err := json.Marshal(request)
if err != nil {
return err, nil
}
req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(jsonData))
if err != nil {
return err, nil
}
if channel.Type == common.ChannelTypeAzure {
req.Header.Set("api-key", channel.Key)
} else {
req.Header.Set("Authorization", "Bearer "+channel.Key)
}
req.Header.Set("Content-Type", "application/json")
resp, err := util.HTTPClient.Do(req)
if err != nil {
return err, nil
}
defer resp.Body.Close()
var response openai.SlimTextResponse
body, err := io.ReadAll(resp.Body)
if err != nil {
return err, nil
}
err = json.Unmarshal(body, &response)
if err != nil {
return fmt.Errorf("Error: %s\nResp body: %s", err, body), nil
}
if response.Usage.CompletionTokens == 0 {
if response.Error.Message == "" {
response.Error.Message = "补全 tokens 非预期返回 0"
}
return errors.New(fmt.Sprintf("type %s, code %v, message %s", response.Error.Type, response.Error.Code, response.Error.Message)), &response.Error
}
return nil, nil
}
func buildTestRequest() *openai.ChatRequest {
testRequest := &openai.ChatRequest{
Model: "", // this will be set later
MaxTokens: 1,
}
testMessage := openai.Message{
Role: "user", Role: "user",
Content: "hi", Content: "hi",
} }
@@ -108,6 +42,72 @@ func buildTestRequest() *openai.ChatRequest {
return testRequest return testRequest
} }
func testChannel(channel *model.Channel) (err error, openaiErr *relaymodel.Error) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request = &http.Request{
Method: "POST",
URL: &url.URL{Path: "/v1/chat/completions"},
Body: nil,
Header: make(http.Header),
}
c.Request.Header.Set("Authorization", "Bearer "+channel.Key)
c.Request.Header.Set("Content-Type", "application/json")
c.Set("channel", channel.Type)
c.Set("base_url", channel.GetBaseURL())
middleware.SetupContextForSelectedChannel(c, channel, "")
meta := util.GetRelayMeta(c)
apiType := constant.ChannelType2APIType(channel.Type)
adaptor := helper.GetAdaptor(apiType)
if adaptor == nil {
return fmt.Errorf("invalid api type: %d, adaptor is nil", apiType), nil
}
adaptor.Init(meta)
modelName := adaptor.GetModelList()[0]
if !strings.Contains(channel.Models, modelName) {
modelNames := strings.Split(channel.Models, ",")
if len(modelNames) > 0 {
modelName = modelNames[0]
}
}
request := buildTestRequest()
request.Model = modelName
meta.OriginModelName, meta.ActualModelName = modelName, modelName
convertedRequest, err := adaptor.ConvertRequest(c, constant.RelayModeChatCompletions, request)
if err != nil {
return err, nil
}
jsonData, err := json.Marshal(convertedRequest)
if err != nil {
return err, nil
}
requestBody := bytes.NewBuffer(jsonData)
c.Request.Body = io.NopCloser(requestBody)
resp, err := adaptor.DoRequest(c, meta, requestBody)
if err != nil {
return err, nil
}
if resp.StatusCode != http.StatusOK {
err := util.RelayErrorHandler(resp)
return fmt.Errorf("status code %d: %s", resp.StatusCode, err.Error.Message), &err.Error
}
usage, respErr := adaptor.DoResponse(c, resp, meta)
if respErr != nil {
return fmt.Errorf("%s", respErr.Error.Message), &respErr.Error
}
if usage == nil {
return errors.New("usage is nil"), nil
}
result := w.Result()
// print result.Body
respBody, err := io.ReadAll(result.Body)
if err != nil {
return err, nil
}
logger.SysLog(fmt.Sprintf("testing channel #%d, response: \n%s", channel.Id, string(respBody)))
return nil, nil
}
func TestChannel(c *gin.Context) { func TestChannel(c *gin.Context) {
id, err := strconv.Atoi(c.Param("id")) id, err := strconv.Atoi(c.Param("id"))
if err != nil { if err != nil {
@@ -125,9 +125,8 @@ func TestChannel(c *gin.Context) {
}) })
return return
} }
testRequest := buildTestRequest()
tik := time.Now() tik := time.Now()
err, _ = testChannel(channel, *testRequest) err, _ = testChannel(channel)
tok := time.Now() tok := time.Now()
milliseconds := tok.Sub(tik).Milliseconds() milliseconds := tok.Sub(tik).Milliseconds()
go channel.UpdateResponseTime(milliseconds) go channel.UpdateResponseTime(milliseconds)
@@ -151,33 +150,7 @@ func TestChannel(c *gin.Context) {
var testAllChannelsLock sync.Mutex var testAllChannelsLock sync.Mutex
var testAllChannelsRunning bool = false var testAllChannelsRunning bool = false
func notifyRootUser(subject string, content string) { func testChannels(notify bool, scope string) error {
if config.RootUserEmail == "" {
config.RootUserEmail = model.GetRootUserEmail()
}
err := common.SendEmail(subject, config.RootUserEmail, content)
if err != nil {
logger.SysError(fmt.Sprintf("failed to send email: %s", err.Error()))
}
}
// disable & notify
func disableChannel(channelId int, channelName string, reason string) {
model.UpdateChannelStatusById(channelId, common.ChannelStatusAutoDisabled)
subject := fmt.Sprintf("通道「%s」#%d已被禁用", channelName, channelId)
content := fmt.Sprintf("通道「%s」#%d已被禁用原因%s", channelName, channelId, reason)
notifyRootUser(subject, content)
}
// enable & notify
func enableChannel(channelId int, channelName string) {
model.UpdateChannelStatusById(channelId, common.ChannelStatusEnabled)
subject := fmt.Sprintf("通道「%s」#%d已被启用", channelName, channelId)
content := fmt.Sprintf("通道「%s」#%d已被启用", channelName, channelId)
notifyRootUser(subject, content)
}
func testAllChannels(notify bool) error {
if config.RootUserEmail == "" { if config.RootUserEmail == "" {
config.RootUserEmail = model.GetRootUserEmail() config.RootUserEmail = model.GetRootUserEmail()
} }
@@ -188,11 +161,10 @@ func testAllChannels(notify bool) error {
} }
testAllChannelsRunning = true testAllChannelsRunning = true
testAllChannelsLock.Unlock() testAllChannelsLock.Unlock()
channels, err := model.GetAllChannels(0, 0, true) channels, err := model.GetAllChannels(0, 0, scope)
if err != nil { if err != nil {
return err return err
} }
testRequest := buildTestRequest()
var disableThreshold = int64(config.ChannelDisableThreshold * 1000) var disableThreshold = int64(config.ChannelDisableThreshold * 1000)
if disableThreshold == 0 { if disableThreshold == 0 {
disableThreshold = 10000000 // a impossible value disableThreshold = 10000000 // a impossible value
@@ -201,18 +173,22 @@ func testAllChannels(notify bool) error {
for _, channel := range channels { for _, channel := range channels {
isChannelEnabled := channel.Status == common.ChannelStatusEnabled isChannelEnabled := channel.Status == common.ChannelStatusEnabled
tik := time.Now() tik := time.Now()
err, openaiErr := testChannel(channel, *testRequest) err, openaiErr := testChannel(channel)
tok := time.Now() tok := time.Now()
milliseconds := tok.Sub(tik).Milliseconds() milliseconds := tok.Sub(tik).Milliseconds()
if isChannelEnabled && milliseconds > disableThreshold { if isChannelEnabled && milliseconds > disableThreshold {
err = errors.New(fmt.Sprintf("响应时间 %.2fs 超过阈值 %.2fs", float64(milliseconds)/1000.0, float64(disableThreshold)/1000.0)) err = errors.New(fmt.Sprintf("响应时间 %.2fs 超过阈值 %.2fs", float64(milliseconds)/1000.0, float64(disableThreshold)/1000.0))
disableChannel(channel.Id, channel.Name, err.Error()) if config.AutomaticDisableChannelEnabled {
monitor.DisableChannel(channel.Id, channel.Name, err.Error())
} else {
_ = message.Notify(message.ByAll, fmt.Sprintf("渠道 %s %d测试超时", channel.Name, channel.Id), "", err.Error())
}
} }
if isChannelEnabled && util.ShouldDisableChannel(openaiErr, -1) { if isChannelEnabled && util.ShouldDisableChannel(openaiErr, -1) {
disableChannel(channel.Id, channel.Name, err.Error()) monitor.DisableChannel(channel.Id, channel.Name, err.Error())
} }
if !isChannelEnabled && util.ShouldEnableChannel(err, openaiErr) { if !isChannelEnabled && util.ShouldEnableChannel(err, openaiErr) {
enableChannel(channel.Id, channel.Name) monitor.EnableChannel(channel.Id, channel.Name)
} }
channel.UpdateResponseTime(milliseconds) channel.UpdateResponseTime(milliseconds)
time.Sleep(config.RequestInterval) time.Sleep(config.RequestInterval)
@@ -221,7 +197,7 @@ func testAllChannels(notify bool) error {
testAllChannelsRunning = false testAllChannelsRunning = false
testAllChannelsLock.Unlock() testAllChannelsLock.Unlock()
if notify { if notify {
err := common.SendEmail("通道测试完成", config.RootUserEmail, "通道测试完成,如果没有收到禁用通知,说明所有通道都正常") err := message.Notify(message.ByAll, "通道测试完成", "", "通道测试完成,如果没有收到禁用通知,说明所有通道都正常")
if err != nil { if err != nil {
logger.SysError(fmt.Sprintf("failed to send email: %s", err.Error())) logger.SysError(fmt.Sprintf("failed to send email: %s", err.Error()))
} }
@@ -230,8 +206,12 @@ func testAllChannels(notify bool) error {
return nil return nil
} }
func TestAllChannels(c *gin.Context) { func TestChannels(c *gin.Context) {
err := testAllChannels(true) scope := c.Query("scope")
if scope == "" {
scope = "all"
}
err := testChannels(true, scope)
if err != nil { if err != nil {
c.JSON(http.StatusOK, gin.H{ c.JSON(http.StatusOK, gin.H{
"success": false, "success": false,
@@ -250,7 +230,7 @@ func AutomaticallyTestChannels(frequency int) {
for { for {
time.Sleep(time.Duration(frequency) * time.Minute) time.Sleep(time.Duration(frequency) * time.Minute)
logger.SysLog("testing all channels") logger.SysLog("testing all channels")
_ = testAllChannels(false) _ = testChannels(false, "all")
logger.SysLog("channel test finished") logger.SysLog("channel test finished")
} }
} }

View File

@@ -2,10 +2,10 @@ package controller
import ( import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/model"
"net/http" "net/http"
"one-api/common/config"
"one-api/common/helper"
"one-api/model"
"strconv" "strconv"
"strings" "strings"
) )
@@ -15,7 +15,7 @@ func GetAllChannels(c *gin.Context) {
if p < 0 { if p < 0 {
p = 0 p = 0
} }
channels, err := model.GetAllChannels(p*config.ItemsPerPage, config.ItemsPerPage, false) channels, err := model.GetAllChannels(p*config.ItemsPerPage, config.ItemsPerPage, "limited")
if err != nil { if err != nil {
c.JSON(http.StatusOK, gin.H{ c.JSON(http.StatusOK, gin.H{
"success": false, "success": false,

View File

@@ -7,12 +7,12 @@ import (
"fmt" "fmt"
"github.com/gin-contrib/sessions" "github.com/gin-contrib/sessions"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/model"
"net/http" "net/http"
"one-api/common"
"one-api/common/config"
"one-api/common/helper"
"one-api/common/logger"
"one-api/model"
"strconv" "strconv"
"time" "time"
) )

View File

@@ -2,13 +2,13 @@ package controller
import ( import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"net/http" "net/http"
"one-api/common"
) )
func GetGroups(c *gin.Context) { func GetGroups(c *gin.Context) {
groupNames := make([]string, 0) groupNames := make([]string, 0)
for groupName, _ := range common.GroupRatio { for groupName := range common.GroupRatio {
groupNames = append(groupNames, groupName) groupNames = append(groupNames, groupName)
} }
c.JSON(http.StatusOK, gin.H{ c.JSON(http.StatusOK, gin.H{

View File

@@ -2,9 +2,9 @@ package controller
import ( import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/model"
"net/http" "net/http"
"one-api/common/config"
"one-api/model"
"strconv" "strconv"
) )

View File

@@ -3,10 +3,11 @@ package controller
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/message"
"github.com/songquanpeng/one-api/model"
"net/http" "net/http"
"one-api/common"
"one-api/common/config"
"one-api/model"
"strings" "strings"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
@@ -110,7 +111,7 @@ func SendEmailVerification(c *gin.Context) {
content := fmt.Sprintf("<p>您好,你正在进行%s邮箱验证。</p>"+ content := fmt.Sprintf("<p>您好,你正在进行%s邮箱验证。</p>"+
"<p>您的验证码为: <strong>%s</strong></p>"+ "<p>您的验证码为: <strong>%s</strong></p>"+
"<p>验证码 %d 分钟内有效,如果不是本人操作,请忽略。</p>", config.SystemName, code, common.VerificationValidMinutes) "<p>验证码 %d 分钟内有效,如果不是本人操作,请忽略。</p>", config.SystemName, code, common.VerificationValidMinutes)
err := common.SendEmail(subject, email, content) err := message.SendEmail(subject, email, content)
if err != nil { if err != nil {
c.JSON(http.StatusOK, gin.H{ c.JSON(http.StatusOK, gin.H{
"success": false, "success": false,
@@ -149,7 +150,7 @@ func SendPasswordResetEmail(c *gin.Context) {
"<p>点击 <a href='%s'>此处</a> 进行密码重置。</p>"+ "<p>点击 <a href='%s'>此处</a> 进行密码重置。</p>"+
"<p>如果链接无法点击,请尝试点击下面的链接或将其复制到浏览器中打开:<br> %s </p>"+ "<p>如果链接无法点击,请尝试点击下面的链接或将其复制到浏览器中打开:<br> %s </p>"+
"<p>重置链接 %d 分钟内有效,如果不是本人操作,请忽略。</p>", config.SystemName, link, link, common.VerificationValidMinutes) "<p>重置链接 %d 分钟内有效,如果不是本人操作,请忽略。</p>", config.SystemName, link, link, common.VerificationValidMinutes)
err := common.SendEmail(subject, email, content) err := message.SendEmail(subject, email, content)
if err != nil { if err != nil {
c.JSON(http.StatusOK, gin.H{ c.JSON(http.StatusOK, gin.H{
"success": false, "success": false,

View File

@@ -3,7 +3,13 @@ package controller
import ( import (
"fmt" "fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"one-api/relay/channel/openai" "github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/helper"
relaymodel "github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"net/http"
) )
// https://platform.openai.com/docs/api-reference/models/list // https://platform.openai.com/docs/api-reference/models/list
@@ -35,6 +41,7 @@ type OpenAIModels struct {
var openAIModels []OpenAIModels var openAIModels []OpenAIModels
var openAIModelsMap map[string]OpenAIModels var openAIModelsMap map[string]OpenAIModels
var channelId2Models map[int][]string
func init() { func init() {
var permission []OpenAIModelPermission var permission []OpenAIModelPermission
@@ -53,552 +60,63 @@ func init() {
IsBlocking: false, IsBlocking: false,
}) })
// https://platform.openai.com/docs/models/model-endpoint-compatibility // https://platform.openai.com/docs/models/model-endpoint-compatibility
openAIModels = []OpenAIModels{ for i := 0; i < constant.APITypeDummy; i++ {
{ if i == constant.APITypeAIProxyLibrary {
Id: "dall-e-2", continue
Object: "model", }
Created: 1677649963, adaptor := helper.GetAdaptor(i)
OwnedBy: "openai", channelName := adaptor.GetChannelName()
Permission: permission, modelNames := adaptor.GetModelList()
Root: "dall-e-2", for _, modelName := range modelNames {
Parent: nil, openAIModels = append(openAIModels, OpenAIModels{
}, Id: modelName,
{ Object: "model",
Id: "dall-e-3", Created: 1626777600,
Object: "model", OwnedBy: channelName,
Created: 1677649963, Permission: permission,
OwnedBy: "openai", Root: modelName,
Permission: permission, Parent: nil,
Root: "dall-e-3", })
Parent: nil, }
}, }
{ for _, channelType := range openai.CompatibleChannels {
Id: "whisper-1", if channelType == common.ChannelTypeAzure {
Object: "model", continue
Created: 1677649963, }
OwnedBy: "openai", channelName, channelModelList := openai.GetCompatibleChannelMeta(channelType)
Permission: permission, for _, modelName := range channelModelList {
Root: "whisper-1", openAIModels = append(openAIModels, OpenAIModels{
Parent: nil, Id: modelName,
}, Object: "model",
{ Created: 1626777600,
Id: "tts-1", OwnedBy: channelName,
Object: "model", Permission: permission,
Created: 1677649963, Root: modelName,
OwnedBy: "openai", Parent: nil,
Permission: permission, })
Root: "tts-1", }
Parent: nil,
},
{
Id: "tts-1-1106",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "tts-1-1106",
Parent: nil,
},
{
Id: "tts-1-hd",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "tts-1-hd",
Parent: nil,
},
{
Id: "tts-1-hd-1106",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "tts-1-hd-1106",
Parent: nil,
},
{
Id: "gpt-3.5-turbo",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-0301",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-0301",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-0613",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-16k",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-16k",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-16k-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-16k-0613",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-1106",
Object: "model",
Created: 1699593571,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-1106",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-instruct",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-instruct",
Parent: nil,
},
{
Id: "gpt-4",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4",
Parent: nil,
},
{
Id: "gpt-4-0314",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-0314",
Parent: nil,
},
{
Id: "gpt-4-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-0613",
Parent: nil,
},
{
Id: "gpt-4-32k",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-32k",
Parent: nil,
},
{
Id: "gpt-4-32k-0314",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-32k-0314",
Parent: nil,
},
{
Id: "gpt-4-32k-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-32k-0613",
Parent: nil,
},
{
Id: "gpt-4-1106-preview",
Object: "model",
Created: 1699593571,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-1106-preview",
Parent: nil,
},
{
Id: "gpt-4-vision-preview",
Object: "model",
Created: 1699593571,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-vision-preview",
Parent: nil,
},
{
Id: "text-embedding-ada-002",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-embedding-ada-002",
Parent: nil,
},
{
Id: "text-davinci-003",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-davinci-003",
Parent: nil,
},
{
Id: "text-davinci-002",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-davinci-002",
Parent: nil,
},
{
Id: "text-curie-001",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-curie-001",
Parent: nil,
},
{
Id: "text-babbage-001",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-babbage-001",
Parent: nil,
},
{
Id: "text-ada-001",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-ada-001",
Parent: nil,
},
{
Id: "text-moderation-latest",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-moderation-latest",
Parent: nil,
},
{
Id: "text-moderation-stable",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-moderation-stable",
Parent: nil,
},
{
Id: "text-davinci-edit-001",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-davinci-edit-001",
Parent: nil,
},
{
Id: "code-davinci-edit-001",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "code-davinci-edit-001",
Parent: nil,
},
{
Id: "davinci-002",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "davinci-002",
Parent: nil,
},
{
Id: "babbage-002",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "babbage-002",
Parent: nil,
},
{
Id: "claude-instant-1",
Object: "model",
Created: 1677649963,
OwnedBy: "anthropic",
Permission: permission,
Root: "claude-instant-1",
Parent: nil,
},
{
Id: "claude-2",
Object: "model",
Created: 1677649963,
OwnedBy: "anthropic",
Permission: permission,
Root: "claude-2",
Parent: nil,
},
{
Id: "claude-2.1",
Object: "model",
Created: 1677649963,
OwnedBy: "anthropic",
Permission: permission,
Root: "claude-2.1",
Parent: nil,
},
{
Id: "claude-2.0",
Object: "model",
Created: 1677649963,
OwnedBy: "anthropic",
Permission: permission,
Root: "claude-2.0",
Parent: nil,
},
{
Id: "ERNIE-Bot",
Object: "model",
Created: 1677649963,
OwnedBy: "baidu",
Permission: permission,
Root: "ERNIE-Bot",
Parent: nil,
},
{
Id: "ERNIE-Bot-turbo",
Object: "model",
Created: 1677649963,
OwnedBy: "baidu",
Permission: permission,
Root: "ERNIE-Bot-turbo",
Parent: nil,
},
{
Id: "ERNIE-Bot-4",
Object: "model",
Created: 1677649963,
OwnedBy: "baidu",
Permission: permission,
Root: "ERNIE-Bot-4",
Parent: nil,
},
{
Id: "Embedding-V1",
Object: "model",
Created: 1677649963,
OwnedBy: "baidu",
Permission: permission,
Root: "Embedding-V1",
Parent: nil,
},
{
Id: "PaLM-2",
Object: "model",
Created: 1677649963,
OwnedBy: "google palm",
Permission: permission,
Root: "PaLM-2",
Parent: nil,
},
{
Id: "gemini-pro",
Object: "model",
Created: 1677649963,
OwnedBy: "google gemini",
Permission: permission,
Root: "gemini-pro",
Parent: nil,
},
{
Id: "gemini-pro-vision",
Object: "model",
Created: 1677649963,
OwnedBy: "google gemini",
Permission: permission,
Root: "gemini-pro-vision",
Parent: nil,
},
{
Id: "chatglm_turbo",
Object: "model",
Created: 1677649963,
OwnedBy: "zhipu",
Permission: permission,
Root: "chatglm_turbo",
Parent: nil,
},
{
Id: "chatglm_pro",
Object: "model",
Created: 1677649963,
OwnedBy: "zhipu",
Permission: permission,
Root: "chatglm_pro",
Parent: nil,
},
{
Id: "chatglm_std",
Object: "model",
Created: 1677649963,
OwnedBy: "zhipu",
Permission: permission,
Root: "chatglm_std",
Parent: nil,
},
{
Id: "chatglm_lite",
Object: "model",
Created: 1677649963,
OwnedBy: "zhipu",
Permission: permission,
Root: "chatglm_lite",
Parent: nil,
},
{
Id: "qwen-turbo",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "qwen-turbo",
Parent: nil,
},
{
Id: "qwen-plus",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "qwen-plus",
Parent: nil,
},
{
Id: "qwen-max",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "qwen-max",
Parent: nil,
},
{
Id: "qwen-max-longcontext",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "qwen-max-longcontext",
Parent: nil,
},
{
Id: "text-embedding-v1",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "text-embedding-v1",
Parent: nil,
},
{
Id: "SparkDesk",
Object: "model",
Created: 1677649963,
OwnedBy: "xunfei",
Permission: permission,
Root: "SparkDesk",
Parent: nil,
},
{
Id: "360GPT_S2_V9",
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Permission: permission,
Root: "360GPT_S2_V9",
Parent: nil,
},
{
Id: "embedding-bert-512-v1",
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Permission: permission,
Root: "embedding-bert-512-v1",
Parent: nil,
},
{
Id: "embedding_s1_v1",
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Permission: permission,
Root: "embedding_s1_v1",
Parent: nil,
},
{
Id: "semantic_similarity_s1_v1",
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Permission: permission,
Root: "semantic_similarity_s1_v1",
Parent: nil,
},
{
Id: "hunyuan",
Object: "model",
Created: 1677649963,
OwnedBy: "tencent",
Permission: permission,
Root: "hunyuan",
Parent: nil,
},
} }
openAIModelsMap = make(map[string]OpenAIModels) openAIModelsMap = make(map[string]OpenAIModels)
for _, model := range openAIModels { for _, model := range openAIModels {
openAIModelsMap[model.Id] = model openAIModelsMap[model.Id] = model
} }
channelId2Models = make(map[int][]string)
for i := 1; i < common.ChannelTypeDummy; i++ {
adaptor := helper.GetAdaptor(constant.ChannelType2APIType(i))
meta := &util.RelayMeta{
ChannelType: i,
}
adaptor.Init(meta)
channelId2Models[i] = adaptor.GetModelList()
}
}
func DashboardListModels(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"success": true,
"message": "",
"data": channelId2Models,
})
} }
func ListModels(c *gin.Context) { func ListModels(c *gin.Context) {
@@ -613,7 +131,7 @@ func RetrieveModel(c *gin.Context) {
if model, ok := openAIModelsMap[modelId]; ok { if model, ok := openAIModelsMap[modelId]; ok {
c.JSON(200, model) c.JSON(200, model)
} else { } else {
Error := openai.Error{ Error := relaymodel.Error{
Message: fmt.Sprintf("The model '%s' does not exist", modelId), Message: fmt.Sprintf("The model '%s' does not exist", modelId),
Type: "invalid_request_error", Type: "invalid_request_error",
Param: "model", Param: "model",

View File

@@ -2,10 +2,10 @@ package controller
import ( import (
"encoding/json" "encoding/json"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/model"
"net/http" "net/http"
"one-api/common/config"
"one-api/common/helper"
"one-api/model"
"strings" "strings"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"

View File

@@ -2,10 +2,10 @@ package controller
import ( import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/model"
"net/http" "net/http"
"one-api/common/config"
"one-api/common/helper"
"one-api/model"
"strconv" "strconv"
) )

View File

@@ -1,24 +1,29 @@
package controller package controller
import ( import (
"bytes"
"context"
"fmt" "fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/middleware"
dbmodel "github.com/songquanpeng/one-api/model"
"github.com/songquanpeng/one-api/monitor"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/controller"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http" "net/http"
"one-api/common/config"
"one-api/common/helper"
"one-api/common/logger"
"one-api/relay/channel/openai"
"one-api/relay/constant"
"one-api/relay/controller"
"one-api/relay/util"
"strconv"
) )
// https://platform.openai.com/docs/api-reference/chat // https://platform.openai.com/docs/api-reference/chat
func Relay(c *gin.Context) { func relay(c *gin.Context, relayMode int) *model.ErrorWithStatusCode {
relayMode := constant.Path2RelayMode(c.Request.URL.Path) var err *model.ErrorWithStatusCode
var err *openai.ErrorWithStatusCode
switch relayMode { switch relayMode {
case constant.RelayModeImagesGenerations: case constant.RelayModeImagesGenerations:
err = controller.RelayImageHelper(c, relayMode) err = controller.RelayImageHelper(c, relayMode)
@@ -29,39 +34,99 @@ func Relay(c *gin.Context) {
case constant.RelayModeAudioTranscription: case constant.RelayModeAudioTranscription:
err = controller.RelayAudioHelper(c, relayMode) err = controller.RelayAudioHelper(c, relayMode)
default: default:
err = controller.RelayTextHelper(c, relayMode) err = controller.RelayTextHelper(c)
} }
if err != nil { return err
requestId := c.GetString(logger.RequestIdKey) }
retryTimesStr := c.Query("retry")
retryTimes, _ := strconv.Atoi(retryTimesStr) func Relay(c *gin.Context) {
if retryTimesStr == "" { ctx := c.Request.Context()
retryTimes = config.RetryTimes relayMode := constant.Path2RelayMode(c.Request.URL.Path)
if config.DebugEnabled {
requestBody, _ := common.GetRequestBody(c)
logger.Debugf(ctx, "request body: %s", string(requestBody))
}
channelId := c.GetInt("channel_id")
bizErr := relay(c, relayMode)
if bizErr == nil {
monitor.Emit(channelId, true)
return
}
lastFailedChannelId := channelId
channelName := c.GetString("channel_name")
group := c.GetString("group")
originalModel := c.GetString("original_model")
go processChannelRelayError(ctx, channelId, channelName, bizErr)
requestId := c.GetString(logger.RequestIdKey)
retryTimes := config.RetryTimes
if !shouldRetry(c, bizErr.StatusCode) {
logger.Errorf(ctx, "relay error happen, status code is %d, won't retry in this case", bizErr.StatusCode)
retryTimes = 0
}
for i := retryTimes; i > 0; i-- {
channel, err := dbmodel.CacheGetRandomSatisfiedChannel(group, originalModel, i != retryTimes)
if err != nil {
logger.Errorf(ctx, "CacheGetRandomSatisfiedChannel failed: %w", err)
break
} }
if retryTimes > 0 { logger.Infof(ctx, "using channel #%d to retry (remain times %d)", channel.Id, i)
c.Redirect(http.StatusTemporaryRedirect, fmt.Sprintf("%s?retry=%d", c.Request.URL.Path, retryTimes-1)) if channel.Id == lastFailedChannelId {
} else { continue
if err.StatusCode == http.StatusTooManyRequests { }
err.Error.Message = "当前分组上游负载已饱和,请稍后再试" middleware.SetupContextForSelectedChannel(c, channel, originalModel)
} requestBody, err := common.GetRequestBody(c)
err.Error.Message = helper.MessageWithRequestId(err.Error.Message, requestId) c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
c.JSON(err.StatusCode, gin.H{ bizErr = relay(c, relayMode)
"error": err.Error, if bizErr == nil {
}) return
} }
channelId := c.GetInt("channel_id") channelId := c.GetInt("channel_id")
logger.Error(c.Request.Context(), fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message)) lastFailedChannelId = channelId
// https://platform.openai.com/docs/guides/error-codes/api-errors channelName := c.GetString("channel_name")
if util.ShouldDisableChannel(&err.Error, err.StatusCode) { go processChannelRelayError(ctx, channelId, channelName, bizErr)
channelId := c.GetInt("channel_id") }
channelName := c.GetString("channel_name") if bizErr != nil {
disableChannel(channelId, channelName, err.Message) if bizErr.StatusCode == http.StatusTooManyRequests {
bizErr.Error.Message = "当前分组上游负载已饱和,请稍后再试"
} }
bizErr.Error.Message = helper.MessageWithRequestId(bizErr.Error.Message, requestId)
c.JSON(bizErr.StatusCode, gin.H{
"error": bizErr.Error,
})
}
}
func shouldRetry(c *gin.Context, statusCode int) bool {
if _, ok := c.Get("specific_channel_id"); ok {
return false
}
if statusCode == http.StatusTooManyRequests {
return true
}
if statusCode/100 == 5 {
return true
}
if statusCode == http.StatusBadRequest {
return false
}
if statusCode/100 == 2 {
return false
}
return true
}
func processChannelRelayError(ctx context.Context, channelId int, channelName string, err *model.ErrorWithStatusCode) {
logger.Errorf(ctx, "relay error (channel #%d): %s", channelId, err.Message)
// https://platform.openai.com/docs/guides/error-codes/api-errors
if util.ShouldDisableChannel(&err.Error, err.StatusCode) {
monitor.DisableChannel(channelId, channelName, err.Message)
} else {
monitor.Emit(channelId, false)
} }
} }
func RelayNotImplemented(c *gin.Context) { func RelayNotImplemented(c *gin.Context) {
err := openai.Error{ err := model.Error{
Message: "API not implemented", Message: "API not implemented",
Type: "one_api_error", Type: "one_api_error",
Param: "", Param: "",
@@ -73,7 +138,7 @@ func RelayNotImplemented(c *gin.Context) {
} }
func RelayNotFound(c *gin.Context) { func RelayNotFound(c *gin.Context) {
err := openai.Error{ err := model.Error{
Message: fmt.Sprintf("Invalid URL (%s %s)", c.Request.Method, c.Request.URL.Path), Message: fmt.Sprintf("Invalid URL (%s %s)", c.Request.Method, c.Request.URL.Path),
Type: "invalid_request_error", Type: "invalid_request_error",
Param: "", Param: "",

View File

@@ -2,11 +2,11 @@ package controller
import ( import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/model"
"net/http" "net/http"
"one-api/common"
"one-api/common/config"
"one-api/common/helper"
"one-api/model"
"strconv" "strconv"
) )

View File

@@ -3,11 +3,11 @@ package controller
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/model"
"net/http" "net/http"
"one-api/common"
"one-api/common/config"
"one-api/common/helper"
"one-api/model"
"strconv" "strconv"
"time" "time"

View File

@@ -5,10 +5,10 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/model"
"net/http" "net/http"
"one-api/common"
"one-api/common/config"
"one-api/model"
"strconv" "strconv"
"time" "time"
) )

View File

@@ -2,7 +2,7 @@ version: '3.4'
services: services:
one-api: one-api:
image: justsong/one-api:latest image: "${REGISTRY:-docker.io}/justsong/one-api:latest"
container_name: one-api container_name: one-api
restart: always restart: always
command: --log-dir /app/logs command: --log-dir /app/logs
@@ -29,12 +29,12 @@ services:
retries: 3 retries: 3
redis: redis:
image: redis:latest image: "${REGISTRY:-docker.io}/redis:latest"
container_name: redis container_name: redis
restart: always restart: always
db: db:
image: mysql:8.2.0 image: "${REGISTRY:-docker.io}/mysql:8.2.0"
restart: always restart: always
container_name: mysql container_name: mysql
volumes: volumes:

4
go.mod
View File

@@ -1,4 +1,4 @@
module one-api module github.com/songquanpeng/one-api
// +heroku goVersion go1.18 // +heroku goVersion go1.18
go 1.18 go 1.18
@@ -60,6 +60,6 @@ require (
golang.org/x/net v0.17.0 // indirect golang.org/x/net v0.17.0 // indirect
golang.org/x/sys v0.15.0 // indirect golang.org/x/sys v0.15.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
) )

4
go.sum
View File

@@ -177,8 +177,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IV
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=

View File

@@ -456,6 +456,7 @@
"已绑定的邮箱账户": "Email Account Bound", "已绑定的邮箱账户": "Email Account Bound",
"用户信息更新成功!": "User information updated successfully!", "用户信息更新成功!": "User information updated successfully!",
"模型倍率 %.2f,分组倍率 %.2f": "model rate %.2f, group rate %.2f", "模型倍率 %.2f,分组倍率 %.2f": "model rate %.2f, group rate %.2f",
"模型倍率 %.2f,分组倍率 %.2f,补全倍率 %.2f": "model rate %.2f, group rate %.2f, completion rate %.2f",
"使用明细(总消耗额度:{renderQuota(stat.quota)}": "Usage Details (Total Consumption Quota: {renderQuota(stat.quota)})", "使用明细(总消耗额度:{renderQuota(stat.quota)}": "Usage Details (Total Consumption Quota: {renderQuota(stat.quota)})",
"用户名称": "User Name", "用户名称": "User Name",
"令牌名称": "Token Name", "令牌名称": "Token Name",

42
main.go
View File

@@ -6,14 +6,14 @@ import (
"github.com/gin-contrib/sessions" "github.com/gin-contrib/sessions"
"github.com/gin-contrib/sessions/cookie" "github.com/gin-contrib/sessions/cookie"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"one-api/common" "github.com/songquanpeng/one-api/common"
"one-api/common/config" "github.com/songquanpeng/one-api/common/config"
"one-api/common/logger" "github.com/songquanpeng/one-api/common/logger"
"one-api/controller" "github.com/songquanpeng/one-api/controller"
"one-api/middleware" "github.com/songquanpeng/one-api/middleware"
"one-api/model" "github.com/songquanpeng/one-api/model"
"one-api/relay/channel/openai" "github.com/songquanpeng/one-api/relay/channel/openai"
"one-api/router" "github.com/songquanpeng/one-api/router"
"os" "os"
"strconv" "strconv"
) )
@@ -30,11 +30,25 @@ func main() {
if config.DebugEnabled { if config.DebugEnabled {
logger.SysLog("running in debug mode") logger.SysLog("running in debug mode")
} }
var err error
// Initialize SQL Database // Initialize SQL Database
err := model.InitDB() model.DB, err = model.InitDB("SQL_DSN")
if err != nil { if err != nil {
logger.FatalLog("failed to initialize database: " + err.Error()) logger.FatalLog("failed to initialize database: " + err.Error())
} }
if os.Getenv("LOG_SQL_DSN") != "" {
logger.SysLog("using secondary database for table logs")
model.LOG_DB, err = model.InitDB("LOG_SQL_DSN")
if err != nil {
logger.FatalLog("failed to initialize secondary database: " + err.Error())
}
} else {
model.LOG_DB = model.DB
}
err = model.CreateRootAccountIfNeed()
if err != nil {
logger.FatalLog("database init error: " + err.Error())
}
defer func() { defer func() {
err := model.CloseDB() err := model.CloseDB()
if err != nil { if err != nil {
@@ -64,13 +78,6 @@ func main() {
go model.SyncOptions(config.SyncFrequency) go model.SyncOptions(config.SyncFrequency)
go model.SyncChannelCache(config.SyncFrequency) go model.SyncChannelCache(config.SyncFrequency)
} }
if os.Getenv("CHANNEL_UPDATE_FREQUENCY") != "" {
frequency, err := strconv.Atoi(os.Getenv("CHANNEL_UPDATE_FREQUENCY"))
if err != nil {
logger.FatalLog("failed to parse CHANNEL_UPDATE_FREQUENCY: " + err.Error())
}
go controller.AutomaticallyUpdateChannels(frequency)
}
if os.Getenv("CHANNEL_TEST_FREQUENCY") != "" { if os.Getenv("CHANNEL_TEST_FREQUENCY") != "" {
frequency, err := strconv.Atoi(os.Getenv("CHANNEL_TEST_FREQUENCY")) frequency, err := strconv.Atoi(os.Getenv("CHANNEL_TEST_FREQUENCY"))
if err != nil { if err != nil {
@@ -83,6 +90,9 @@ func main() {
logger.SysLog("batch update enabled with interval " + strconv.Itoa(config.BatchUpdateInterval) + "s") logger.SysLog("batch update enabled with interval " + strconv.Itoa(config.BatchUpdateInterval) + "s")
model.InitBatchUpdater() model.InitBatchUpdater()
} }
if config.EnableMetric {
logger.SysLog("metric enabled, will disable channel if too much request failed")
}
openai.InitTokenEncoders() openai.InitTokenEncoders()
// Initialize HTTP server // Initialize HTTP server

View File

@@ -3,9 +3,10 @@ package middleware
import ( import (
"github.com/gin-contrib/sessions" "github.com/gin-contrib/sessions"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/blacklist"
"github.com/songquanpeng/one-api/model"
"net/http" "net/http"
"one-api/common"
"one-api/model"
"strings" "strings"
) )
@@ -42,11 +43,14 @@ func authHelper(c *gin.Context, minRole int) {
return return
} }
} }
if status.(int) == common.UserStatusDisabled { if status.(int) == common.UserStatusDisabled || blacklist.IsUserBanned(id.(int)) {
c.JSON(http.StatusOK, gin.H{ c.JSON(http.StatusOK, gin.H{
"success": false, "success": false,
"message": "用户已被封禁", "message": "用户已被封禁",
}) })
session := sessions.Default(c)
session.Clear()
_ = session.Save()
c.Abort() c.Abort()
return return
} }
@@ -99,7 +103,7 @@ func TokenAuth() func(c *gin.Context) {
abortWithMessage(c, http.StatusInternalServerError, err.Error()) abortWithMessage(c, http.StatusInternalServerError, err.Error())
return return
} }
if !userEnabled { if !userEnabled || blacklist.IsUserBanned(token.UserId) {
abortWithMessage(c, http.StatusForbidden, "用户已被封禁") abortWithMessage(c, http.StatusForbidden, "用户已被封禁")
return return
} }
@@ -108,7 +112,7 @@ func TokenAuth() func(c *gin.Context) {
c.Set("token_name", token.Name) c.Set("token_name", token.Name)
if len(parts) > 1 { if len(parts) > 1 {
if model.IsAdmin(token.UserId) { if model.IsAdmin(token.UserId) {
c.Set("channelId", parts[1]) c.Set("specific_channel_id", parts[1])
} else { } else {
abortWithMessage(c, http.StatusForbidden, "普通用户不支持指定渠道") abortWithMessage(c, http.StatusForbidden, "普通用户不支持指定渠道")
return return

View File

@@ -2,10 +2,10 @@ package middleware
import ( import (
"fmt" "fmt"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/model"
"net/http" "net/http"
"one-api/common"
"one-api/common/logger"
"one-api/model"
"strconv" "strconv"
"strings" "strings"
@@ -21,8 +21,9 @@ func Distribute() func(c *gin.Context) {
userId := c.GetInt("id") userId := c.GetInt("id")
userGroup, _ := model.CacheGetUserGroup(userId) userGroup, _ := model.CacheGetUserGroup(userId)
c.Set("group", userGroup) c.Set("group", userGroup)
var requestModel string
var channel *model.Channel var channel *model.Channel
channelId, ok := c.Get("channelId") channelId, ok := c.Get("specific_channel_id")
if ok { if ok {
id, err := strconv.Atoi(channelId.(string)) id, err := strconv.Atoi(channelId.(string))
if err != nil { if err != nil {
@@ -66,7 +67,8 @@ func Distribute() func(c *gin.Context) {
modelRequest.Model = "whisper-1" modelRequest.Model = "whisper-1"
} }
} }
channel, err = model.CacheGetRandomSatisfiedChannel(userGroup, modelRequest.Model) requestModel = modelRequest.Model
channel, err = model.CacheGetRandomSatisfiedChannel(userGroup, modelRequest.Model, false)
if err != nil { if err != nil {
message := fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道", userGroup, modelRequest.Model) message := fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道", userGroup, modelRequest.Model)
if channel != nil { if channel != nil {
@@ -77,24 +79,34 @@ func Distribute() func(c *gin.Context) {
return return
} }
} }
c.Set("channel", channel.Type) SetupContextForSelectedChannel(c, channel, requestModel)
c.Set("channel_id", channel.Id)
c.Set("channel_name", channel.Name)
c.Set("model_mapping", channel.GetModelMapping())
c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
c.Set("base_url", channel.GetBaseURL())
switch channel.Type {
case common.ChannelTypeAzure:
c.Set("api_version", channel.Other)
case common.ChannelTypeXunfei:
c.Set("api_version", channel.Other)
case common.ChannelTypeGemini:
c.Set("api_version", channel.Other)
case common.ChannelTypeAIProxyLibrary:
c.Set("library_id", channel.Other)
case common.ChannelTypeAli:
c.Set("plugin", channel.Other)
}
c.Next() c.Next()
} }
} }
func SetupContextForSelectedChannel(c *gin.Context, channel *model.Channel, modelName string) {
c.Set("channel", channel.Type)
c.Set("channel_id", channel.Id)
c.Set("channel_name", channel.Name)
c.Set("model_mapping", channel.GetModelMapping())
c.Set("original_model", modelName) // for retry
c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
c.Set("base_url", channel.GetBaseURL())
// this is for backward compatibility
switch channel.Type {
case common.ChannelTypeAzure:
c.Set(common.ConfigKeyAPIVersion, channel.Other)
case common.ChannelTypeXunfei:
c.Set(common.ConfigKeyAPIVersion, channel.Other)
case common.ChannelTypeGemini:
c.Set(common.ConfigKeyAPIVersion, channel.Other)
case common.ChannelTypeAIProxyLibrary:
c.Set(common.ConfigKeyLibraryID, channel.Other)
case common.ChannelTypeAli:
c.Set(common.ConfigKeyPlugin, channel.Other)
}
cfg, _ := channel.LoadConfig()
for k, v := range cfg {
c.Set(common.ConfigKeyPrefix+k, v)
}
}

View File

@@ -3,7 +3,7 @@ package middleware
import ( import (
"fmt" "fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"one-api/common/logger" "github.com/songquanpeng/one-api/common/logger"
) )
func SetUpLogger(server *gin.Engine) { func SetUpLogger(server *gin.Engine) {

View File

@@ -4,9 +4,9 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"net/http" "net/http"
"one-api/common"
"one-api/common/config"
"time" "time"
) )

View File

@@ -3,8 +3,9 @@ package middleware
import ( import (
"fmt" "fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/logger"
"net/http" "net/http"
"one-api/common/logger"
"runtime/debug" "runtime/debug"
) )
@@ -12,11 +13,15 @@ func RelayPanicRecover() gin.HandlerFunc {
return func(c *gin.Context) { return func(c *gin.Context) {
defer func() { defer func() {
if err := recover(); err != nil { if err := recover(); err != nil {
logger.SysError(fmt.Sprintf("panic detected: %v", err)) ctx := c.Request.Context()
logger.SysError(fmt.Sprintf("stacktrace from panic: %s", string(debug.Stack()))) logger.Errorf(ctx, fmt.Sprintf("panic detected: %v", err))
logger.Errorf(ctx, fmt.Sprintf("stacktrace from panic: %s", string(debug.Stack())))
logger.Errorf(ctx, fmt.Sprintf("request: %s %s", c.Request.Method, c.Request.URL.Path))
body, _ := common.GetRequestBody(c)
logger.Errorf(ctx, fmt.Sprintf("request body: %s", string(body)))
c.JSON(http.StatusInternalServerError, gin.H{ c.JSON(http.StatusInternalServerError, gin.H{
"error": gin.H{ "error": gin.H{
"message": fmt.Sprintf("Panic detected, error: %v. Please submit a issue here: https://github.com/songquanpeng/one-api", err), "message": fmt.Sprintf("Panic detected, error: %v. Please submit an issue with the related log here: https://github.com/songquanpeng/one-api", err),
"type": "one_api_panic", "type": "one_api_panic",
}, },
}) })

View File

@@ -3,13 +3,13 @@ package middleware
import ( import (
"context" "context"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"one-api/common/helper" "github.com/songquanpeng/one-api/common/helper"
"one-api/common/logger" "github.com/songquanpeng/one-api/common/logger"
) )
func RequestId() func(c *gin.Context) { func RequestId() func(c *gin.Context) {
return func(c *gin.Context) { return func(c *gin.Context) {
id := helper.GetTimeString() + helper.GetRandomString(8) id := helper.GenRequestID()
c.Set(logger.RequestIdKey, id) c.Set(logger.RequestIdKey, id)
ctx := context.WithValue(c.Request.Context(), logger.RequestIdKey, id) ctx := context.WithValue(c.Request.Context(), logger.RequestIdKey, id)
c.Request = c.Request.WithContext(ctx) c.Request = c.Request.WithContext(ctx)

View File

@@ -4,10 +4,10 @@ import (
"encoding/json" "encoding/json"
"github.com/gin-contrib/sessions" "github.com/gin-contrib/sessions"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/logger"
"net/http" "net/http"
"net/url" "net/url"
"one-api/common/config"
"one-api/common/logger"
) )
type turnstileCheckResponse struct { type turnstileCheckResponse struct {

View File

@@ -2,8 +2,8 @@ package middleware
import ( import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"one-api/common/helper" "github.com/songquanpeng/one-api/common/helper"
"one-api/common/logger" "github.com/songquanpeng/one-api/common/logger"
) )
func abortWithMessage(c *gin.Context, statusCode int, message string) { func abortWithMessage(c *gin.Context, statusCode int, message string) {

View File

@@ -1,7 +1,7 @@
package model package model
import ( import (
"one-api/common" "github.com/songquanpeng/one-api/common"
"strings" "strings"
) )

View File

@@ -1,13 +1,14 @@
package model package model
import ( import (
"context"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/logger"
"math/rand" "math/rand"
"one-api/common"
"one-api/common/config"
"one-api/common/logger"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@@ -70,31 +71,42 @@ func CacheGetUserGroup(id int) (group string, err error) {
return group, err return group, err
} }
func CacheGetUserQuota(id int) (quota int, err error) { func fetchAndUpdateUserQuota(ctx context.Context, id int) (quota int64, err error) {
quota, err = GetUserQuota(id)
if err != nil {
return 0, err
}
err = common.RedisSet(fmt.Sprintf("user_quota:%d", id), fmt.Sprintf("%d", quota), time.Duration(UserId2QuotaCacheSeconds)*time.Second)
if err != nil {
logger.Error(ctx, "Redis set user quota error: "+err.Error())
}
return
}
func CacheGetUserQuota(ctx context.Context, id int) (quota int64, err error) {
if !common.RedisEnabled { if !common.RedisEnabled {
return GetUserQuota(id) return GetUserQuota(id)
} }
quotaString, err := common.RedisGet(fmt.Sprintf("user_quota:%d", id)) quotaString, err := common.RedisGet(fmt.Sprintf("user_quota:%d", id))
if err != nil { if err != nil {
quota, err = GetUserQuota(id) return fetchAndUpdateUserQuota(ctx, id)
if err != nil {
return 0, err
}
err = common.RedisSet(fmt.Sprintf("user_quota:%d", id), fmt.Sprintf("%d", quota), time.Duration(UserId2QuotaCacheSeconds)*time.Second)
if err != nil {
logger.SysError("Redis set user quota error: " + err.Error())
}
return quota, err
} }
quota, err = strconv.Atoi(quotaString) quota, err = strconv.ParseInt(quotaString, 10, 64)
return quota, err if err != nil {
return 0, nil
}
if quota <= config.PreConsumedQuota { // when user's quota is less than pre-consumed quota, we need to fetch from db
logger.Infof(ctx, "user %d's cached quota is too low: %d, refreshing from db", quota, id)
return fetchAndUpdateUserQuota(ctx, id)
}
return quota, nil
} }
func CacheUpdateUserQuota(id int) error { func CacheUpdateUserQuota(ctx context.Context, id int) error {
if !common.RedisEnabled { if !common.RedisEnabled {
return nil return nil
} }
quota, err := GetUserQuota(id) quota, err := CacheGetUserQuota(ctx, id)
if err != nil { if err != nil {
return err return err
} }
@@ -102,7 +114,7 @@ func CacheUpdateUserQuota(id int) error {
return err return err
} }
func CacheDecreaseUserQuota(id int, quota int) error { func CacheDecreaseUserQuota(id int, quota int64) error {
if !common.RedisEnabled { if !common.RedisEnabled {
return nil return nil
} }
@@ -191,7 +203,7 @@ func SyncChannelCache(frequency int) {
} }
} }
func CacheGetRandomSatisfiedChannel(group string, model string) (*Channel, error) { func CacheGetRandomSatisfiedChannel(group string, model string, ignoreFirstPriority bool) (*Channel, error) {
if !config.MemoryCacheEnabled { if !config.MemoryCacheEnabled {
return GetRandomSatisfiedChannel(group, model) return GetRandomSatisfiedChannel(group, model)
} }
@@ -213,5 +225,10 @@ func CacheGetRandomSatisfiedChannel(group string, model string) (*Channel, error
} }
} }
idx := rand.Intn(endIdx) idx := rand.Intn(endIdx)
if ignoreFirstPriority {
if endIdx < len(channels) { // which means there are more than one priority
idx = common.RandRange(endIdx, len(channels))
}
}
return channels[idx], nil return channels[idx], nil
} }

View File

@@ -3,17 +3,17 @@ package model
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger"
"gorm.io/gorm" "gorm.io/gorm"
"one-api/common"
"one-api/common/config"
"one-api/common/helper"
"one-api/common/logger"
) )
type Channel struct { type Channel struct {
Id int `json:"id"` Id int `json:"id"`
Type int `json:"type" gorm:"default:0"` Type int `json:"type" gorm:"default:0"`
Key string `json:"key" gorm:"not null;index"` Key string `json:"key" gorm:"type:text"`
Status int `json:"status" gorm:"default:1"` Status int `json:"status" gorm:"default:1"`
Name string `json:"name" gorm:"index"` Name string `json:"name" gorm:"index"`
Weight *uint `json:"weight" gorm:"default:0"` Weight *uint `json:"weight" gorm:"default:0"`
@@ -21,7 +21,7 @@ type Channel struct {
TestTime int64 `json:"test_time" gorm:"bigint"` TestTime int64 `json:"test_time" gorm:"bigint"`
ResponseTime int `json:"response_time"` // in milliseconds ResponseTime int `json:"response_time"` // in milliseconds
BaseURL *string `json:"base_url" gorm:"column:base_url;default:''"` BaseURL *string `json:"base_url" gorm:"column:base_url;default:''"`
Other string `json:"other"` Other string `json:"other"` // DEPRECATED: please save config to field Config
Balance float64 `json:"balance"` // in USD Balance float64 `json:"balance"` // in USD
BalanceUpdatedTime int64 `json:"balance_updated_time" gorm:"bigint"` BalanceUpdatedTime int64 `json:"balance_updated_time" gorm:"bigint"`
Models string `json:"models"` Models string `json:"models"`
@@ -29,25 +29,25 @@ type Channel struct {
UsedQuota int64 `json:"used_quota" gorm:"bigint;default:0"` UsedQuota int64 `json:"used_quota" gorm:"bigint;default:0"`
ModelMapping *string `json:"model_mapping" gorm:"type:varchar(1024);default:''"` ModelMapping *string `json:"model_mapping" gorm:"type:varchar(1024);default:''"`
Priority *int64 `json:"priority" gorm:"bigint;default:0"` Priority *int64 `json:"priority" gorm:"bigint;default:0"`
Config string `json:"config"`
} }
func GetAllChannels(startIdx int, num int, selectAll bool) ([]*Channel, error) { func GetAllChannels(startIdx int, num int, scope string) ([]*Channel, error) {
var channels []*Channel var channels []*Channel
var err error var err error
if selectAll { switch scope {
case "all":
err = DB.Order("id desc").Find(&channels).Error err = DB.Order("id desc").Find(&channels).Error
} else { case "disabled":
err = DB.Order("id desc").Where("status = ? or status = ?", common.ChannelStatusAutoDisabled, common.ChannelStatusManuallyDisabled).Find(&channels).Error
default:
err = DB.Order("id desc").Limit(num).Offset(startIdx).Omit("key").Find(&channels).Error err = DB.Order("id desc").Limit(num).Offset(startIdx).Omit("key").Find(&channels).Error
} }
return channels, err return channels, err
} }
func SearchChannels(keyword string) (channels []*Channel, err error) { func SearchChannels(keyword string) (channels []*Channel, err error) {
keyCol := "`key`" err = DB.Omit("key").Where("id = ? or name LIKE ?", helper.String2Int(keyword), keyword+"%").Find(&channels).Error
if common.UsingPostgreSQL {
keyCol = `"key"`
}
err = DB.Omit("key").Where("id = ? or name LIKE ? or "+keyCol+" = ?", helper.String2Int(keyword), keyword+"%", keyword).Find(&channels).Error
return channels, err return channels, err
} }
@@ -155,6 +155,18 @@ func (channel *Channel) Delete() error {
return err return err
} }
func (channel *Channel) LoadConfig() (map[string]string, error) {
if channel.Config == "" {
return nil, nil
}
cfg := make(map[string]string)
err := json.Unmarshal([]byte(channel.Config), &cfg)
if err != nil {
return nil, err
}
return cfg, nil
}
func UpdateChannelStatusById(id int, status int) { func UpdateChannelStatusById(id int, status int) {
err := UpdateAbilityStatus(id, status == common.ChannelStatusEnabled) err := UpdateAbilityStatus(id, status == common.ChannelStatusEnabled)
if err != nil { if err != nil {
@@ -166,7 +178,7 @@ func UpdateChannelStatusById(id int, status int) {
} }
} }
func UpdateChannelUsedQuota(id int, quota int) { func UpdateChannelUsedQuota(id int, quota int64) {
if config.BatchUpdateEnabled { if config.BatchUpdateEnabled {
addNewRecord(BatchUpdateTypeChannelUsedQuota, id, quota) addNewRecord(BatchUpdateTypeChannelUsedQuota, id, quota)
return return
@@ -174,7 +186,7 @@ func UpdateChannelUsedQuota(id int, quota int) {
updateChannelUsedQuota(id, quota) updateChannelUsedQuota(id, quota)
} }
func updateChannelUsedQuota(id int, quota int) { func updateChannelUsedQuota(id int, quota int64) {
err := DB.Model(&Channel{}).Where("id = ?", id).Update("used_quota", gorm.Expr("used_quota + ?", quota)).Error err := DB.Model(&Channel{}).Where("id = ?", id).Update("used_quota", gorm.Expr("used_quota + ?", quota)).Error
if err != nil { if err != nil {
logger.SysError("failed to update channel used quota: " + err.Error()) logger.SysError("failed to update channel used quota: " + err.Error())

View File

@@ -3,10 +3,10 @@ package model
import ( import (
"context" "context"
"fmt" "fmt"
"one-api/common" "github.com/songquanpeng/one-api/common"
"one-api/common/config" "github.com/songquanpeng/one-api/common/config"
"one-api/common/helper" "github.com/songquanpeng/one-api/common/helper"
"one-api/common/logger" "github.com/songquanpeng/one-api/common/logger"
"gorm.io/gorm" "gorm.io/gorm"
) )
@@ -45,13 +45,13 @@ func RecordLog(userId int, logType int, content string) {
Type: logType, Type: logType,
Content: content, Content: content,
} }
err := DB.Create(log).Error err := LOG_DB.Create(log).Error
if err != nil { if err != nil {
logger.SysError("failed to record log: " + err.Error()) logger.SysError("failed to record log: " + err.Error())
} }
} }
func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptTokens int, completionTokens int, modelName string, tokenName string, quota int, content string) { func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptTokens int, completionTokens int, modelName string, tokenName string, quota int64, content string) {
logger.Info(ctx, fmt.Sprintf("record consume log: userId=%d, channelId=%d, promptTokens=%d, completionTokens=%d, modelName=%s, tokenName=%s, quota=%d, content=%s", userId, channelId, promptTokens, completionTokens, modelName, tokenName, quota, content)) logger.Info(ctx, fmt.Sprintf("record consume log: userId=%d, channelId=%d, promptTokens=%d, completionTokens=%d, modelName=%s, tokenName=%s, quota=%d, content=%s", userId, channelId, promptTokens, completionTokens, modelName, tokenName, quota, content))
if !config.LogConsumeEnabled { if !config.LogConsumeEnabled {
return return
@@ -66,10 +66,10 @@ func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptToke
CompletionTokens: completionTokens, CompletionTokens: completionTokens,
TokenName: tokenName, TokenName: tokenName,
ModelName: modelName, ModelName: modelName,
Quota: quota, Quota: int(quota),
ChannelId: channelId, ChannelId: channelId,
} }
err := DB.Create(log).Error err := LOG_DB.Create(log).Error
if err != nil { if err != nil {
logger.Error(ctx, "failed to record log: "+err.Error()) logger.Error(ctx, "failed to record log: "+err.Error())
} }
@@ -78,9 +78,9 @@ func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptToke
func GetAllLogs(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string, startIdx int, num int, channel int) (logs []*Log, err error) { func GetAllLogs(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string, startIdx int, num int, channel int) (logs []*Log, err error) {
var tx *gorm.DB var tx *gorm.DB
if logType == LogTypeUnknown { if logType == LogTypeUnknown {
tx = DB tx = LOG_DB
} else { } else {
tx = DB.Where("type = ?", logType) tx = LOG_DB.Where("type = ?", logType)
} }
if modelName != "" { if modelName != "" {
tx = tx.Where("model_name = ?", modelName) tx = tx.Where("model_name = ?", modelName)
@@ -107,9 +107,9 @@ func GetAllLogs(logType int, startTimestamp int64, endTimestamp int64, modelName
func GetUserLogs(userId int, logType int, startTimestamp int64, endTimestamp int64, modelName string, tokenName string, startIdx int, num int) (logs []*Log, err error) { func GetUserLogs(userId int, logType int, startTimestamp int64, endTimestamp int64, modelName string, tokenName string, startIdx int, num int) (logs []*Log, err error) {
var tx *gorm.DB var tx *gorm.DB
if logType == LogTypeUnknown { if logType == LogTypeUnknown {
tx = DB.Where("user_id = ?", userId) tx = LOG_DB.Where("user_id = ?", userId)
} else { } else {
tx = DB.Where("user_id = ? and type = ?", userId, logType) tx = LOG_DB.Where("user_id = ? and type = ?", userId, logType)
} }
if modelName != "" { if modelName != "" {
tx = tx.Where("model_name = ?", modelName) tx = tx.Where("model_name = ?", modelName)
@@ -128,17 +128,17 @@ func GetUserLogs(userId int, logType int, startTimestamp int64, endTimestamp int
} }
func SearchAllLogs(keyword string) (logs []*Log, err error) { func SearchAllLogs(keyword string) (logs []*Log, err error) {
err = DB.Where("type = ? or content LIKE ?", keyword, keyword+"%").Order("id desc").Limit(config.MaxRecentItems).Find(&logs).Error err = LOG_DB.Where("type = ? or content LIKE ?", keyword, keyword+"%").Order("id desc").Limit(config.MaxRecentItems).Find(&logs).Error
return logs, err return logs, err
} }
func SearchUserLogs(userId int, keyword string) (logs []*Log, err error) { func SearchUserLogs(userId int, keyword string) (logs []*Log, err error) {
err = DB.Where("user_id = ? and type = ?", userId, keyword).Order("id desc").Limit(config.MaxRecentItems).Omit("id").Find(&logs).Error err = LOG_DB.Where("user_id = ? and type = ?", userId, keyword).Order("id desc").Limit(config.MaxRecentItems).Omit("id").Find(&logs).Error
return logs, err return logs, err
} }
func SumUsedQuota(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string, channel int) (quota int) { func SumUsedQuota(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string, channel int) (quota int64) {
tx := DB.Table("logs").Select("ifnull(sum(quota),0)") tx := LOG_DB.Table("logs").Select("ifnull(sum(quota),0)")
if username != "" { if username != "" {
tx = tx.Where("username = ?", username) tx = tx.Where("username = ?", username)
} }
@@ -162,7 +162,7 @@ func SumUsedQuota(logType int, startTimestamp int64, endTimestamp int64, modelNa
} }
func SumUsedToken(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string) (token int) { func SumUsedToken(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string) (token int) {
tx := DB.Table("logs").Select("ifnull(sum(prompt_tokens),0) + ifnull(sum(completion_tokens),0)") tx := LOG_DB.Table("logs").Select("ifnull(sum(prompt_tokens),0) + ifnull(sum(completion_tokens),0)")
if username != "" { if username != "" {
tx = tx.Where("username = ?", username) tx = tx.Where("username = ?", username)
} }
@@ -183,7 +183,7 @@ func SumUsedToken(logType int, startTimestamp int64, endTimestamp int64, modelNa
} }
func DeleteOldLog(targetTimestamp int64) (int64, error) { func DeleteOldLog(targetTimestamp int64) (int64, error) {
result := DB.Where("created_at < ?", targetTimestamp).Delete(&Log{}) result := LOG_DB.Where("created_at < ?", targetTimestamp).Delete(&Log{})
return result.RowsAffected, result.Error return result.RowsAffected, result.Error
} }
@@ -207,7 +207,7 @@ func SearchLogsByDayAndModel(userId, start, end int) (LogStatistics []*LogStatis
groupSelect = "strftime('%Y-%m-%d', datetime(created_at, 'unixepoch')) as day" groupSelect = "strftime('%Y-%m-%d', datetime(created_at, 'unixepoch')) as day"
} }
err = DB.Raw(` err = LOG_DB.Raw(`
SELECT `+groupSelect+`, SELECT `+groupSelect+`,
model_name, count(1) as request_count, model_name, count(1) as request_count,
sum(quota) as quota, sum(quota) as quota,

View File

@@ -2,22 +2,24 @@ package model
import ( import (
"fmt" "fmt"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/env"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger"
"gorm.io/driver/mysql" "gorm.io/driver/mysql"
"gorm.io/driver/postgres" "gorm.io/driver/postgres"
"gorm.io/driver/sqlite" "gorm.io/driver/sqlite"
"gorm.io/gorm" "gorm.io/gorm"
"one-api/common"
"one-api/common/config"
"one-api/common/helper"
"one-api/common/logger"
"os" "os"
"strings" "strings"
"time" "time"
) )
var DB *gorm.DB var DB *gorm.DB
var LOG_DB *gorm.DB
func createRootAccountIfNeed() error { func CreateRootAccountIfNeed() error {
var user User var user User
//if user.Status != util.UserStatusEnabled { //if user.Status != util.UserStatusEnabled {
if err := DB.First(&user).Error; err != nil { if err := DB.First(&user).Error; err != nil {
@@ -40,9 +42,9 @@ func createRootAccountIfNeed() error {
return nil return nil
} }
func chooseDB() (*gorm.DB, error) { func chooseDB(envName string) (*gorm.DB, error) {
if os.Getenv("SQL_DSN") != "" { if os.Getenv(envName) != "" {
dsn := os.Getenv("SQL_DSN") dsn := os.Getenv(envName)
if strings.HasPrefix(dsn, "postgres://") { if strings.HasPrefix(dsn, "postgres://") {
// Use PostgreSQL // Use PostgreSQL
logger.SysLog("using PostgreSQL as database") logger.SysLog("using PostgreSQL as database")
@@ -56,6 +58,7 @@ func chooseDB() (*gorm.DB, error) {
} }
// Use MySQL // Use MySQL
logger.SysLog("using MySQL as database") logger.SysLog("using MySQL as database")
common.UsingMySQL = true
return gorm.Open(mysql.Open(dsn), &gorm.Config{ return gorm.Open(mysql.Open(dsn), &gorm.Config{
PrepareStmt: true, // precompile SQL PrepareStmt: true, // precompile SQL
}) })
@@ -69,67 +72,78 @@ func chooseDB() (*gorm.DB, error) {
}) })
} }
func InitDB() (err error) { func InitDB(envName string) (db *gorm.DB, err error) {
db, err := chooseDB() db, err = chooseDB(envName)
if err == nil { if err == nil {
if config.DebugEnabled { if config.DebugSQLEnabled {
db = db.Debug() db = db.Debug()
} }
DB = db sqlDB, err := db.DB()
sqlDB, err := DB.DB()
if err != nil { if err != nil {
return err return nil, err
} }
sqlDB.SetMaxIdleConns(helper.GetOrDefaultEnvInt("SQL_MAX_IDLE_CONNS", 100)) sqlDB.SetMaxIdleConns(env.Int("SQL_MAX_IDLE_CONNS", 100))
sqlDB.SetMaxOpenConns(helper.GetOrDefaultEnvInt("SQL_MAX_OPEN_CONNS", 1000)) sqlDB.SetMaxOpenConns(env.Int("SQL_MAX_OPEN_CONNS", 1000))
sqlDB.SetConnMaxLifetime(time.Second * time.Duration(helper.GetOrDefaultEnvInt("SQL_MAX_LIFETIME", 60))) sqlDB.SetConnMaxLifetime(time.Second * time.Duration(env.Int("SQL_MAX_LIFETIME", 60)))
if !config.IsMasterNode { if !config.IsMasterNode {
return nil return db, err
}
if common.UsingMySQL {
_, _ = sqlDB.Exec("DROP INDEX idx_channels_key ON channels;") // TODO: delete this line when most users have upgraded
} }
logger.SysLog("database migration started") logger.SysLog("database migration started")
err = db.AutoMigrate(&Channel{}) err = db.AutoMigrate(&Channel{})
if err != nil { if err != nil {
return err return nil, err
} }
err = db.AutoMigrate(&Token{}) err = db.AutoMigrate(&Token{})
if err != nil { if err != nil {
return err return nil, err
} }
err = db.AutoMigrate(&User{}) err = db.AutoMigrate(&User{})
if err != nil { if err != nil {
return err return nil, err
} }
err = db.AutoMigrate(&Option{}) err = db.AutoMigrate(&Option{})
if err != nil { if err != nil {
return err return nil, err
} }
err = db.AutoMigrate(&Redemption{}) err = db.AutoMigrate(&Redemption{})
if err != nil { if err != nil {
return err return nil, err
} }
err = db.AutoMigrate(&Ability{}) err = db.AutoMigrate(&Ability{})
if err != nil { if err != nil {
return err return nil, err
} }
err = db.AutoMigrate(&Log{}) err = db.AutoMigrate(&Log{})
if err != nil { if err != nil {
return err return nil, err
} }
logger.SysLog("database migrated") logger.SysLog("database migrated")
err = createRootAccountIfNeed() return db, err
return err
} else { } else {
logger.FatalLog(err) logger.FatalLog(err)
} }
return err return db, err
} }
func CloseDB() error { func closeDB(db *gorm.DB) error {
sqlDB, err := DB.DB() sqlDB, err := db.DB()
if err != nil { if err != nil {
return err return err
} }
err = sqlDB.Close() err = sqlDB.Close()
return err return err
} }
func CloseDB() error {
if LOG_DB != DB {
err := closeDB(LOG_DB)
if err != nil {
return err
}
}
return closeDB(DB)
}

View File

@@ -1,9 +1,9 @@
package model package model
import ( import (
"one-api/common" "github.com/songquanpeng/one-api/common"
"one-api/common/config" "github.com/songquanpeng/one-api/common/config"
"one-api/common/logger" "github.com/songquanpeng/one-api/common/logger"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@@ -57,15 +57,18 @@ func InitOptionMap() {
config.OptionMap["WeChatServerAddress"] = "" config.OptionMap["WeChatServerAddress"] = ""
config.OptionMap["WeChatServerToken"] = "" config.OptionMap["WeChatServerToken"] = ""
config.OptionMap["WeChatAccountQRCodeImageURL"] = "" config.OptionMap["WeChatAccountQRCodeImageURL"] = ""
config.OptionMap["MessagePusherAddress"] = ""
config.OptionMap["MessagePusherToken"] = ""
config.OptionMap["TurnstileSiteKey"] = "" config.OptionMap["TurnstileSiteKey"] = ""
config.OptionMap["TurnstileSecretKey"] = "" config.OptionMap["TurnstileSecretKey"] = ""
config.OptionMap["QuotaForNewUser"] = strconv.Itoa(config.QuotaForNewUser) config.OptionMap["QuotaForNewUser"] = strconv.FormatInt(config.QuotaForNewUser, 10)
config.OptionMap["QuotaForInviter"] = strconv.Itoa(config.QuotaForInviter) config.OptionMap["QuotaForInviter"] = strconv.FormatInt(config.QuotaForInviter, 10)
config.OptionMap["QuotaForInvitee"] = strconv.Itoa(config.QuotaForInvitee) config.OptionMap["QuotaForInvitee"] = strconv.FormatInt(config.QuotaForInvitee, 10)
config.OptionMap["QuotaRemindThreshold"] = strconv.Itoa(config.QuotaRemindThreshold) config.OptionMap["QuotaRemindThreshold"] = strconv.FormatInt(config.QuotaRemindThreshold, 10)
config.OptionMap["PreConsumedQuota"] = strconv.Itoa(config.PreConsumedQuota) config.OptionMap["PreConsumedQuota"] = strconv.FormatInt(config.PreConsumedQuota, 10)
config.OptionMap["ModelRatio"] = common.ModelRatio2JSONString() config.OptionMap["ModelRatio"] = common.ModelRatio2JSONString()
config.OptionMap["GroupRatio"] = common.GroupRatio2JSONString() config.OptionMap["GroupRatio"] = common.GroupRatio2JSONString()
config.OptionMap["CompletionRatio"] = common.CompletionRatio2JSONString()
config.OptionMap["TopUpLink"] = config.TopUpLink config.OptionMap["TopUpLink"] = config.TopUpLink
config.OptionMap["ChatLink"] = config.ChatLink config.OptionMap["ChatLink"] = config.ChatLink
config.OptionMap["QuotaPerUnit"] = strconv.FormatFloat(config.QuotaPerUnit, 'f', -1, 64) config.OptionMap["QuotaPerUnit"] = strconv.FormatFloat(config.QuotaPerUnit, 'f', -1, 64)
@@ -78,6 +81,9 @@ func InitOptionMap() {
func loadOptionsFromDatabase() { func loadOptionsFromDatabase() {
options, _ := AllOption() options, _ := AllOption()
for _, option := range options { for _, option := range options {
if option.Key == "ModelRatio" {
option.Value = common.AddNewMissingRatio(option.Value)
}
err := updateOptionMap(option.Key, option.Value) err := updateOptionMap(option.Key, option.Value)
if err != nil { if err != nil {
logger.SysError("failed to update option map: " + err.Error()) logger.SysError("failed to update option map: " + err.Error())
@@ -178,26 +184,32 @@ func updateOptionMap(key string, value string) (err error) {
config.WeChatServerToken = value config.WeChatServerToken = value
case "WeChatAccountQRCodeImageURL": case "WeChatAccountQRCodeImageURL":
config.WeChatAccountQRCodeImageURL = value config.WeChatAccountQRCodeImageURL = value
case "MessagePusherAddress":
config.MessagePusherAddress = value
case "MessagePusherToken":
config.MessagePusherToken = value
case "TurnstileSiteKey": case "TurnstileSiteKey":
config.TurnstileSiteKey = value config.TurnstileSiteKey = value
case "TurnstileSecretKey": case "TurnstileSecretKey":
config.TurnstileSecretKey = value config.TurnstileSecretKey = value
case "QuotaForNewUser": case "QuotaForNewUser":
config.QuotaForNewUser, _ = strconv.Atoi(value) config.QuotaForNewUser, _ = strconv.ParseInt(value, 10, 64)
case "QuotaForInviter": case "QuotaForInviter":
config.QuotaForInviter, _ = strconv.Atoi(value) config.QuotaForInviter, _ = strconv.ParseInt(value, 10, 64)
case "QuotaForInvitee": case "QuotaForInvitee":
config.QuotaForInvitee, _ = strconv.Atoi(value) config.QuotaForInvitee, _ = strconv.ParseInt(value, 10, 64)
case "QuotaRemindThreshold": case "QuotaRemindThreshold":
config.QuotaRemindThreshold, _ = strconv.Atoi(value) config.QuotaRemindThreshold, _ = strconv.ParseInt(value, 10, 64)
case "PreConsumedQuota": case "PreConsumedQuota":
config.PreConsumedQuota, _ = strconv.Atoi(value) config.PreConsumedQuota, _ = strconv.ParseInt(value, 10, 64)
case "RetryTimes": case "RetryTimes":
config.RetryTimes, _ = strconv.Atoi(value) config.RetryTimes, _ = strconv.Atoi(value)
case "ModelRatio": case "ModelRatio":
err = common.UpdateModelRatioByJSONString(value) err = common.UpdateModelRatioByJSONString(value)
case "GroupRatio": case "GroupRatio":
err = common.UpdateGroupRatioByJSONString(value) err = common.UpdateGroupRatioByJSONString(value)
case "CompletionRatio":
err = common.UpdateCompletionRatioByJSONString(value)
case "TopUpLink": case "TopUpLink":
config.TopUpLink = value config.TopUpLink = value
case "ChatLink": case "ChatLink":

View File

@@ -3,9 +3,9 @@ package model
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/helper"
"gorm.io/gorm" "gorm.io/gorm"
"one-api/common"
"one-api/common/helper"
) )
type Redemption struct { type Redemption struct {
@@ -14,7 +14,7 @@ type Redemption struct {
Key string `json:"key" gorm:"type:char(32);uniqueIndex"` Key string `json:"key" gorm:"type:char(32);uniqueIndex"`
Status int `json:"status" gorm:"default:1"` Status int `json:"status" gorm:"default:1"`
Name string `json:"name" gorm:"index"` Name string `json:"name" gorm:"index"`
Quota int `json:"quota" gorm:"default:100"` Quota int64 `json:"quota" gorm:"default:100"`
CreatedTime int64 `json:"created_time" gorm:"bigint"` CreatedTime int64 `json:"created_time" gorm:"bigint"`
RedeemedTime int64 `json:"redeemed_time" gorm:"bigint"` RedeemedTime int64 `json:"redeemed_time" gorm:"bigint"`
Count int `json:"count" gorm:"-:all"` // only for api request Count int `json:"count" gorm:"-:all"` // only for api request
@@ -42,7 +42,7 @@ func GetRedemptionById(id int) (*Redemption, error) {
return &redemption, err return &redemption, err
} }
func Redeem(key string, userId int) (quota int, err error) { func Redeem(key string, userId int) (quota int64, err error) {
if key == "" { if key == "" {
return 0, errors.New("未提供兑换码") return 0, errors.New("未提供兑换码")
} }

View File

@@ -3,11 +3,12 @@ package model
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/common/message"
"gorm.io/gorm" "gorm.io/gorm"
"one-api/common"
"one-api/common/config"
"one-api/common/helper"
"one-api/common/logger"
) )
type Token struct { type Token struct {
@@ -19,9 +20,9 @@ type Token struct {
CreatedTime int64 `json:"created_time" gorm:"bigint"` CreatedTime int64 `json:"created_time" gorm:"bigint"`
AccessedTime int64 `json:"accessed_time" gorm:"bigint"` AccessedTime int64 `json:"accessed_time" gorm:"bigint"`
ExpiredTime int64 `json:"expired_time" gorm:"bigint;default:-1"` // -1 means never expired ExpiredTime int64 `json:"expired_time" gorm:"bigint;default:-1"` // -1 means never expired
RemainQuota int `json:"remain_quota" gorm:"default:0"` RemainQuota int64 `json:"remain_quota" gorm:"default:0"`
UnlimitedQuota bool `json:"unlimited_quota" gorm:"default:false"` UnlimitedQuota bool `json:"unlimited_quota" gorm:"default:false"`
UsedQuota int `json:"used_quota" gorm:"default:0"` // used quota UsedQuota int64 `json:"used_quota" gorm:"default:0"` // used quota
} }
func GetAllUserTokens(userId int, startIdx int, num int) ([]*Token, error) { func GetAllUserTokens(userId int, startIdx int, num int) ([]*Token, error) {
@@ -137,7 +138,7 @@ func DeleteTokenById(id int, userId int) (err error) {
return token.Delete() return token.Delete()
} }
func IncreaseTokenQuota(id int, quota int) (err error) { func IncreaseTokenQuota(id int, quota int64) (err error) {
if quota < 0 { if quota < 0 {
return errors.New("quota 不能为负数!") return errors.New("quota 不能为负数!")
} }
@@ -148,7 +149,7 @@ func IncreaseTokenQuota(id int, quota int) (err error) {
return increaseTokenQuota(id, quota) return increaseTokenQuota(id, quota)
} }
func increaseTokenQuota(id int, quota int) (err error) { func increaseTokenQuota(id int, quota int64) (err error) {
err = DB.Model(&Token{}).Where("id = ?", id).Updates( err = DB.Model(&Token{}).Where("id = ?", id).Updates(
map[string]interface{}{ map[string]interface{}{
"remain_quota": gorm.Expr("remain_quota + ?", quota), "remain_quota": gorm.Expr("remain_quota + ?", quota),
@@ -159,7 +160,7 @@ func increaseTokenQuota(id int, quota int) (err error) {
return err return err
} }
func DecreaseTokenQuota(id int, quota int) (err error) { func DecreaseTokenQuota(id int, quota int64) (err error) {
if quota < 0 { if quota < 0 {
return errors.New("quota 不能为负数!") return errors.New("quota 不能为负数!")
} }
@@ -170,7 +171,7 @@ func DecreaseTokenQuota(id int, quota int) (err error) {
return decreaseTokenQuota(id, quota) return decreaseTokenQuota(id, quota)
} }
func decreaseTokenQuota(id int, quota int) (err error) { func decreaseTokenQuota(id int, quota int64) (err error) {
err = DB.Model(&Token{}).Where("id = ?", id).Updates( err = DB.Model(&Token{}).Where("id = ?", id).Updates(
map[string]interface{}{ map[string]interface{}{
"remain_quota": gorm.Expr("remain_quota - ?", quota), "remain_quota": gorm.Expr("remain_quota - ?", quota),
@@ -181,7 +182,7 @@ func decreaseTokenQuota(id int, quota int) (err error) {
return err return err
} }
func PreConsumeTokenQuota(tokenId int, quota int) (err error) { func PreConsumeTokenQuota(tokenId int, quota int64) (err error) {
if quota < 0 { if quota < 0 {
return errors.New("quota 不能为负数!") return errors.New("quota 不能为负数!")
} }
@@ -213,7 +214,7 @@ func PreConsumeTokenQuota(tokenId int, quota int) (err error) {
} }
if email != "" { if email != "" {
topUpLink := fmt.Sprintf("%s/topup", config.ServerAddress) topUpLink := fmt.Sprintf("%s/topup", config.ServerAddress)
err = common.SendEmail(prompt, email, err = message.SendEmail(prompt, email,
fmt.Sprintf("%s当前剩余额度为 %d为了不影响您的使用请及时充值。<br/>充值链接:<a href='%s'>%s</a>", prompt, userQuota, topUpLink, topUpLink)) fmt.Sprintf("%s当前剩余额度为 %d为了不影响您的使用请及时充值。<br/>充值链接:<a href='%s'>%s</a>", prompt, userQuota, topUpLink, topUpLink))
if err != nil { if err != nil {
logger.SysError("failed to send email" + err.Error()) logger.SysError("failed to send email" + err.Error())
@@ -231,7 +232,7 @@ func PreConsumeTokenQuota(tokenId int, quota int) (err error) {
return err return err
} }
func PostConsumeTokenQuota(tokenId int, quota int) (err error) { func PostConsumeTokenQuota(tokenId int, quota int64) (err error) {
token, err := GetTokenById(tokenId) token, err := GetTokenById(tokenId)
if quota > 0 { if quota > 0 {
err = DecreaseUserQuota(token.UserId, quota) err = DecreaseUserQuota(token.UserId, quota)

View File

@@ -3,11 +3,12 @@ package model
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/blacklist"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger"
"gorm.io/gorm" "gorm.io/gorm"
"one-api/common"
"one-api/common/config"
"one-api/common/helper"
"one-api/common/logger"
"strings" "strings"
) )
@@ -25,8 +26,8 @@ type User struct {
WeChatId string `json:"wechat_id" gorm:"column:wechat_id;index"` WeChatId string `json:"wechat_id" gorm:"column:wechat_id;index"`
VerificationCode string `json:"verification_code" gorm:"-:all"` // this field is only for Email verification, don't save it to database! VerificationCode string `json:"verification_code" gorm:"-:all"` // this field is only for Email verification, don't save it to database!
AccessToken string `json:"access_token" gorm:"type:char(32);column:access_token;uniqueIndex"` // this token is for system management AccessToken string `json:"access_token" gorm:"type:char(32);column:access_token;uniqueIndex"` // this token is for system management
Quota int `json:"quota" gorm:"type:int;default:0"` Quota int64 `json:"quota" gorm:"type:int;default:0"`
UsedQuota int `json:"used_quota" gorm:"type:int;default:0;column:used_quota"` // used quota UsedQuota int64 `json:"used_quota" gorm:"type:int;default:0;column:used_quota"` // used quota
RequestCount int `json:"request_count" gorm:"type:int;default:0;"` // request number RequestCount int `json:"request_count" gorm:"type:int;default:0;"` // request number
Group string `json:"group" gorm:"type:varchar(32);default:'default'"` Group string `json:"group" gorm:"type:varchar(32);default:'default'"`
AffCode string `json:"aff_code" gorm:"type:varchar(32);column:aff_code;uniqueIndex"` AffCode string `json:"aff_code" gorm:"type:varchar(32);column:aff_code;uniqueIndex"`
@@ -40,7 +41,7 @@ func GetMaxUserId() int {
} }
func GetAllUsers(startIdx int, num int) (users []*User, err error) { func GetAllUsers(startIdx int, num int) (users []*User, err error) {
err = DB.Order("id desc").Limit(num).Offset(startIdx).Omit("password").Find(&users).Error err = DB.Order("id desc").Limit(num).Offset(startIdx).Omit("password").Where("status != ?", common.UserStatusDeleted).Find(&users).Error
return users, err return users, err
} }
@@ -123,6 +124,11 @@ func (user *User) Update(updatePassword bool) error {
return err return err
} }
} }
if user.Status == common.UserStatusDisabled {
blacklist.BanUser(user.Id)
} else if user.Status == common.UserStatusEnabled {
blacklist.UnbanUser(user.Id)
}
err = DB.Model(user).Updates(user).Error err = DB.Model(user).Updates(user).Error
return err return err
} }
@@ -131,7 +137,10 @@ func (user *User) Delete() error {
if user.Id == 0 { if user.Id == 0 {
return errors.New("id 为空!") return errors.New("id 为空!")
} }
err := DB.Delete(user).Error blacklist.BanUser(user.Id)
user.Username = fmt.Sprintf("deleted_%s", helper.GetUUID())
user.Status = common.UserStatusDeleted
err := DB.Model(user).Updates(user).Error
return err return err
} }
@@ -265,12 +274,12 @@ func ValidateAccessToken(token string) (user *User) {
return nil return nil
} }
func GetUserQuota(id int) (quota int, err error) { func GetUserQuota(id int) (quota int64, err error) {
err = DB.Model(&User{}).Where("id = ?", id).Select("quota").Find(&quota).Error err = DB.Model(&User{}).Where("id = ?", id).Select("quota").Find(&quota).Error
return quota, err return quota, err
} }
func GetUserUsedQuota(id int) (quota int, err error) { func GetUserUsedQuota(id int) (quota int64, err error) {
err = DB.Model(&User{}).Where("id = ?", id).Select("used_quota").Find(&quota).Error err = DB.Model(&User{}).Where("id = ?", id).Select("used_quota").Find(&quota).Error
return quota, err return quota, err
} }
@@ -290,7 +299,7 @@ func GetUserGroup(id int) (group string, err error) {
return group, err return group, err
} }
func IncreaseUserQuota(id int, quota int) (err error) { func IncreaseUserQuota(id int, quota int64) (err error) {
if quota < 0 { if quota < 0 {
return errors.New("quota 不能为负数!") return errors.New("quota 不能为负数!")
} }
@@ -301,12 +310,12 @@ func IncreaseUserQuota(id int, quota int) (err error) {
return increaseUserQuota(id, quota) return increaseUserQuota(id, quota)
} }
func increaseUserQuota(id int, quota int) (err error) { func increaseUserQuota(id int, quota int64) (err error) {
err = DB.Model(&User{}).Where("id = ?", id).Update("quota", gorm.Expr("quota + ?", quota)).Error err = DB.Model(&User{}).Where("id = ?", id).Update("quota", gorm.Expr("quota + ?", quota)).Error
return err return err
} }
func DecreaseUserQuota(id int, quota int) (err error) { func DecreaseUserQuota(id int, quota int64) (err error) {
if quota < 0 { if quota < 0 {
return errors.New("quota 不能为负数!") return errors.New("quota 不能为负数!")
} }
@@ -317,7 +326,7 @@ func DecreaseUserQuota(id int, quota int) (err error) {
return decreaseUserQuota(id, quota) return decreaseUserQuota(id, quota)
} }
func decreaseUserQuota(id int, quota int) (err error) { func decreaseUserQuota(id int, quota int64) (err error) {
err = DB.Model(&User{}).Where("id = ?", id).Update("quota", gorm.Expr("quota - ?", quota)).Error err = DB.Model(&User{}).Where("id = ?", id).Update("quota", gorm.Expr("quota - ?", quota)).Error
return err return err
} }
@@ -327,7 +336,7 @@ func GetRootUserEmail() (email string) {
return email return email
} }
func UpdateUserUsedQuotaAndRequestCount(id int, quota int) { func UpdateUserUsedQuotaAndRequestCount(id int, quota int64) {
if config.BatchUpdateEnabled { if config.BatchUpdateEnabled {
addNewRecord(BatchUpdateTypeUsedQuota, id, quota) addNewRecord(BatchUpdateTypeUsedQuota, id, quota)
addNewRecord(BatchUpdateTypeRequestCount, id, 1) addNewRecord(BatchUpdateTypeRequestCount, id, 1)
@@ -336,7 +345,7 @@ func UpdateUserUsedQuotaAndRequestCount(id int, quota int) {
updateUserUsedQuotaAndRequestCount(id, quota, 1) updateUserUsedQuotaAndRequestCount(id, quota, 1)
} }
func updateUserUsedQuotaAndRequestCount(id int, quota int, count int) { func updateUserUsedQuotaAndRequestCount(id int, quota int64, count int) {
err := DB.Model(&User{}).Where("id = ?", id).Updates( err := DB.Model(&User{}).Where("id = ?", id).Updates(
map[string]interface{}{ map[string]interface{}{
"used_quota": gorm.Expr("used_quota + ?", quota), "used_quota": gorm.Expr("used_quota + ?", quota),
@@ -348,7 +357,7 @@ func updateUserUsedQuotaAndRequestCount(id int, quota int, count int) {
} }
} }
func updateUserUsedQuota(id int, quota int) { func updateUserUsedQuota(id int, quota int64) {
err := DB.Model(&User{}).Where("id = ?", id).Updates( err := DB.Model(&User{}).Where("id = ?", id).Updates(
map[string]interface{}{ map[string]interface{}{
"used_quota": gorm.Expr("used_quota + ?", quota), "used_quota": gorm.Expr("used_quota + ?", quota),

View File

@@ -1,8 +1,8 @@
package model package model
import ( import (
"one-api/common/config" "github.com/songquanpeng/one-api/common/config"
"one-api/common/logger" "github.com/songquanpeng/one-api/common/logger"
"sync" "sync"
"time" "time"
) )
@@ -16,12 +16,12 @@ const (
BatchUpdateTypeCount // if you add a new type, you need to add a new map and a new lock BatchUpdateTypeCount // if you add a new type, you need to add a new map and a new lock
) )
var batchUpdateStores []map[int]int var batchUpdateStores []map[int]int64
var batchUpdateLocks []sync.Mutex var batchUpdateLocks []sync.Mutex
func init() { func init() {
for i := 0; i < BatchUpdateTypeCount; i++ { for i := 0; i < BatchUpdateTypeCount; i++ {
batchUpdateStores = append(batchUpdateStores, make(map[int]int)) batchUpdateStores = append(batchUpdateStores, make(map[int]int64))
batchUpdateLocks = append(batchUpdateLocks, sync.Mutex{}) batchUpdateLocks = append(batchUpdateLocks, sync.Mutex{})
} }
} }
@@ -35,7 +35,7 @@ func InitBatchUpdater() {
}() }()
} }
func addNewRecord(type_ int, id int, value int) { func addNewRecord(type_ int, id int, value int64) {
batchUpdateLocks[type_].Lock() batchUpdateLocks[type_].Lock()
defer batchUpdateLocks[type_].Unlock() defer batchUpdateLocks[type_].Unlock()
if _, ok := batchUpdateStores[type_][id]; !ok { if _, ok := batchUpdateStores[type_][id]; !ok {
@@ -50,7 +50,7 @@ func batchUpdate() {
for i := 0; i < BatchUpdateTypeCount; i++ { for i := 0; i < BatchUpdateTypeCount; i++ {
batchUpdateLocks[i].Lock() batchUpdateLocks[i].Lock()
store := batchUpdateStores[i] store := batchUpdateStores[i]
batchUpdateStores[i] = make(map[int]int) batchUpdateStores[i] = make(map[int]int64)
batchUpdateLocks[i].Unlock() batchUpdateLocks[i].Unlock()
// TODO: maybe we can combine updates with same key? // TODO: maybe we can combine updates with same key?
for key, value := range store { for key, value := range store {
@@ -68,7 +68,7 @@ func batchUpdate() {
case BatchUpdateTypeUsedQuota: case BatchUpdateTypeUsedQuota:
updateUserUsedQuota(key, value) updateUserUsedQuota(key, value)
case BatchUpdateTypeRequestCount: case BatchUpdateTypeRequestCount:
updateUserRequestCount(key, value) updateUserRequestCount(key, int(value))
case BatchUpdateTypeChannelUsedQuota: case BatchUpdateTypeChannelUsedQuota:
updateChannelUsedQuota(key, value) updateChannelUsedQuota(key, value)
} }

55
monitor/channel.go Normal file
View File

@@ -0,0 +1,55 @@
package monitor
import (
"fmt"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/common/message"
"github.com/songquanpeng/one-api/model"
)
func notifyRootUser(subject string, content string) {
if config.MessagePusherAddress != "" {
err := message.SendMessage(subject, content, content)
if err != nil {
logger.SysError(fmt.Sprintf("failed to send message: %s", err.Error()))
} else {
return
}
}
if config.RootUserEmail == "" {
config.RootUserEmail = model.GetRootUserEmail()
}
err := message.SendEmail(subject, config.RootUserEmail, content)
if err != nil {
logger.SysError(fmt.Sprintf("failed to send email: %s", err.Error()))
}
}
// DisableChannel disable & notify
func DisableChannel(channelId int, channelName string, reason string) {
model.UpdateChannelStatusById(channelId, common.ChannelStatusAutoDisabled)
logger.SysLog(fmt.Sprintf("channel #%d has been disabled: %s", channelId, reason))
subject := fmt.Sprintf("通道「%s」#%d已被禁用", channelName, channelId)
content := fmt.Sprintf("通道「%s」#%d已被禁用原因%s", channelName, channelId, reason)
notifyRootUser(subject, content)
}
func MetricDisableChannel(channelId int, successRate float64) {
model.UpdateChannelStatusById(channelId, common.ChannelStatusAutoDisabled)
logger.SysLog(fmt.Sprintf("channel #%d has been disabled due to low success rate: %.2f", channelId, successRate*100))
subject := fmt.Sprintf("通道 #%d 已被禁用", channelId)
content := fmt.Sprintf("该渠道在最近 %d 次调用中成功率为 %.2f%%,低于阈值 %.2f%%,因此被系统自动禁用。",
config.MetricQueueSize, successRate*100, config.MetricSuccessRateThreshold*100)
notifyRootUser(subject, content)
}
// EnableChannel enable & notify
func EnableChannel(channelId int, channelName string) {
model.UpdateChannelStatusById(channelId, common.ChannelStatusEnabled)
logger.SysLog(fmt.Sprintf("channel #%d has been enabled", channelId))
subject := fmt.Sprintf("通道「%s」#%d已被启用", channelName, channelId)
content := fmt.Sprintf("通道「%s」#%d已被启用", channelName, channelId)
notifyRootUser(subject, content)
}

79
monitor/metric.go Normal file
View File

@@ -0,0 +1,79 @@
package monitor
import (
"github.com/songquanpeng/one-api/common/config"
)
var store = make(map[int][]bool)
var metricSuccessChan = make(chan int, config.MetricSuccessChanSize)
var metricFailChan = make(chan int, config.MetricFailChanSize)
func consumeSuccess(channelId int) {
if len(store[channelId]) > config.MetricQueueSize {
store[channelId] = store[channelId][1:]
}
store[channelId] = append(store[channelId], true)
}
func consumeFail(channelId int) (bool, float64) {
if len(store[channelId]) > config.MetricQueueSize {
store[channelId] = store[channelId][1:]
}
store[channelId] = append(store[channelId], false)
successCount := 0
for _, success := range store[channelId] {
if success {
successCount++
}
}
successRate := float64(successCount) / float64(len(store[channelId]))
if len(store[channelId]) < config.MetricQueueSize {
return false, successRate
}
if successRate < config.MetricSuccessRateThreshold {
store[channelId] = make([]bool, 0)
return true, successRate
}
return false, successRate
}
func metricSuccessConsumer() {
for {
select {
case channelId := <-metricSuccessChan:
consumeSuccess(channelId)
}
}
}
func metricFailConsumer() {
for {
select {
case channelId := <-metricFailChan:
disable, successRate := consumeFail(channelId)
if disable {
go MetricDisableChannel(channelId, successRate)
}
}
}
}
func init() {
if config.EnableMetric {
go metricSuccessConsumer()
go metricFailConsumer()
}
}
func Emit(channelId int, success bool) {
if !config.EnableMetric {
return
}
go func() {
if success {
metricSuccessChan <- channelId
} else {
metricFailChan <- channelId
}
}()
}

View File

@@ -0,0 +1,8 @@
package ai360
var ModelList = []string{
"360GPT_S2_V9",
"embedding-bert-512-v1",
"embedding_s1_v1",
"semantic_similarity_s1_v1",
}

View File

@@ -1,22 +1,60 @@
package aiproxy package aiproxy
import ( import (
"errors"
"fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/relay/channel"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http" "net/http"
"one-api/relay/channel/openai"
) )
type Adaptor struct { type Adaptor struct {
} }
func (a *Adaptor) Auth(c *gin.Context) error { func (a *Adaptor) Init(meta *util.RelayMeta) {
}
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
return fmt.Sprintf("%s/api/library/ask", meta.BaseURL), nil
}
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
channel.SetupCommonRequestHeader(c, req, meta)
req.Header.Set("Authorization", "Bearer "+meta.APIKey)
return nil return nil
} }
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) { func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
return nil, nil if request == nil {
return nil, errors.New("request is nil")
}
aiProxyLibraryRequest := ConvertRequest(*request)
aiProxyLibraryRequest.LibraryId = c.GetString(common.ConfigKeyLibraryID)
return aiProxyLibraryRequest, nil
} }
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) { func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
return nil, nil, nil return channel.DoRequestHelper(a, c, meta, requestBody)
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
if meta.IsStream {
err, usage = StreamHandler(c, resp)
} else {
err, usage = Handler(c, resp)
}
return
}
func (a *Adaptor) GetModelList() []string {
return ModelList
}
func (a *Adaptor) GetChannelName() string {
return "aiproxy"
} }

View File

@@ -0,0 +1,9 @@
package aiproxy
import "github.com/songquanpeng/one-api/relay/channel/openai"
var ModelList = []string{""}
func init() {
ModelList = openai.ModelList
}

View File

@@ -5,20 +5,21 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/model"
"io" "io"
"net/http" "net/http"
"one-api/common"
"one-api/common/helper"
"one-api/common/logger"
"one-api/relay/channel/openai"
"one-api/relay/constant"
"strconv" "strconv"
"strings" "strings"
) )
// https://docs.aiproxy.io/dev/library#使用已经定制好的知识库进行对话问答 // https://docs.aiproxy.io/dev/library#使用已经定制好的知识库进行对话问答
func ConvertRequest(request openai.GeneralOpenAIRequest) *LibraryRequest { func ConvertRequest(request model.GeneralOpenAIRequest) *LibraryRequest {
query := "" query := ""
if len(request.Messages) != 0 { if len(request.Messages) != 0 {
query = request.Messages[len(request.Messages)-1].StringContent() query = request.Messages[len(request.Messages)-1].StringContent()
@@ -45,14 +46,14 @@ func responseAIProxyLibrary2OpenAI(response *LibraryResponse) *openai.TextRespon
content := response.Answer + aiProxyDocuments2Markdown(response.Documents) content := response.Answer + aiProxyDocuments2Markdown(response.Documents)
choice := openai.TextResponseChoice{ choice := openai.TextResponseChoice{
Index: 0, Index: 0,
Message: openai.Message{ Message: model.Message{
Role: "assistant", Role: "assistant",
Content: content, Content: content,
}, },
FinishReason: "stop", FinishReason: "stop",
} }
fullTextResponse := openai.TextResponse{ fullTextResponse := openai.TextResponse{
Id: helper.GetUUID(), Id: fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
Object: "chat.completion", Object: "chat.completion",
Created: helper.GetTimestamp(), Created: helper.GetTimestamp(),
Choices: []openai.TextResponseChoice{choice}, Choices: []openai.TextResponseChoice{choice},
@@ -65,7 +66,7 @@ func documentsAIProxyLibrary(documents []LibraryDocument) *openai.ChatCompletion
choice.Delta.Content = aiProxyDocuments2Markdown(documents) choice.Delta.Content = aiProxyDocuments2Markdown(documents)
choice.FinishReason = &constant.StopFinishReason choice.FinishReason = &constant.StopFinishReason
return &openai.ChatCompletionsStreamResponse{ return &openai.ChatCompletionsStreamResponse{
Id: helper.GetUUID(), Id: fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
Object: "chat.completion.chunk", Object: "chat.completion.chunk",
Created: helper.GetTimestamp(), Created: helper.GetTimestamp(),
Model: "", Model: "",
@@ -77,7 +78,7 @@ func streamResponseAIProxyLibrary2OpenAI(response *LibraryStreamResponse) *opena
var choice openai.ChatCompletionsStreamResponseChoice var choice openai.ChatCompletionsStreamResponseChoice
choice.Delta.Content = response.Content choice.Delta.Content = response.Content
return &openai.ChatCompletionsStreamResponse{ return &openai.ChatCompletionsStreamResponse{
Id: helper.GetUUID(), Id: fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
Object: "chat.completion.chunk", Object: "chat.completion.chunk",
Created: helper.GetTimestamp(), Created: helper.GetTimestamp(),
Model: response.Model, Model: response.Model,
@@ -85,8 +86,8 @@ func streamResponseAIProxyLibrary2OpenAI(response *LibraryStreamResponse) *opena
} }
} }
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) { func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
var usage openai.Usage var usage model.Usage
scanner := bufio.NewScanner(resp.Body) scanner := bufio.NewScanner(resp.Body)
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 { if atEOF && len(data) == 0 {
@@ -157,7 +158,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
return nil, &usage return nil, &usage
} }
func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) { func Handler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
var AIProxyLibraryResponse LibraryResponse var AIProxyLibraryResponse LibraryResponse
responseBody, err := io.ReadAll(resp.Body) responseBody, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
@@ -172,8 +173,8 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
} }
if AIProxyLibraryResponse.ErrCode != 0 { if AIProxyLibraryResponse.ErrCode != 0 {
return &openai.ErrorWithStatusCode{ return &model.ErrorWithStatusCode{
Error: openai.Error{ Error: model.Error{
Message: AIProxyLibraryResponse.Message, Message: AIProxyLibraryResponse.Message,
Type: strconv.Itoa(AIProxyLibraryResponse.ErrCode), Type: strconv.Itoa(AIProxyLibraryResponse.ErrCode),
Code: AIProxyLibraryResponse.ErrCode, Code: AIProxyLibraryResponse.ErrCode,
@@ -189,5 +190,8 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
c.Writer.Header().Set("Content-Type", "application/json") c.Writer.Header().Set("Content-Type", "application/json")
c.Writer.WriteHeader(resp.StatusCode) c.Writer.WriteHeader(resp.StatusCode)
_, err = c.Writer.Write(jsonResponse) _, err = c.Writer.Write(jsonResponse)
if err != nil {
return openai.ErrorWrapper(err, "write_response_body_failed", http.StatusInternalServerError), nil
}
return nil, &fullTextResponse.Usage return nil, &fullTextResponse.Usage
} }

View File

@@ -1,22 +1,86 @@
package ali package ali
import ( import (
"errors"
"fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/relay/channel"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http" "net/http"
"one-api/relay/channel/openai"
) )
// https://help.aliyun.com/zh/dashscope/developer-reference/api-details
type Adaptor struct { type Adaptor struct {
} }
func (a *Adaptor) Auth(c *gin.Context) error { func (a *Adaptor) Init(meta *util.RelayMeta) {
}
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
fullRequestURL := fmt.Sprintf("%s/api/v1/services/aigc/text-generation/generation", meta.BaseURL)
if meta.Mode == constant.RelayModeEmbeddings {
fullRequestURL = fmt.Sprintf("%s/api/v1/services/embeddings/text-embedding/text-embedding", meta.BaseURL)
}
return fullRequestURL, nil
}
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
channel.SetupCommonRequestHeader(c, req, meta)
if meta.IsStream {
req.Header.Set("Accept", "text/event-stream")
}
req.Header.Set("Authorization", "Bearer "+meta.APIKey)
if meta.IsStream {
req.Header.Set("X-DashScope-SSE", "enable")
}
if c.GetString(common.ConfigKeyPlugin) != "" {
req.Header.Set("X-DashScope-Plugin", c.GetString(common.ConfigKeyPlugin))
}
return nil return nil
} }
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) { func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
return nil, nil if request == nil {
return nil, errors.New("request is nil")
}
switch relayMode {
case constant.RelayModeEmbeddings:
baiduEmbeddingRequest := ConvertEmbeddingRequest(*request)
return baiduEmbeddingRequest, nil
default:
baiduRequest := ConvertRequest(*request)
return baiduRequest, nil
}
} }
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) { func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
return nil, nil, nil return channel.DoRequestHelper(a, c, meta, requestBody)
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
if meta.IsStream {
err, usage = StreamHandler(c, resp)
} else {
switch meta.Mode {
case constant.RelayModeEmbeddings:
err, usage = EmbeddingHandler(c, resp)
default:
err, usage = Handler(c, resp)
}
}
return
}
func (a *Adaptor) GetModelList() []string {
return ModelList
}
func (a *Adaptor) GetChannelName() string {
return "ali"
} }

View File

@@ -0,0 +1,6 @@
package ali
var ModelList = []string{
"qwen-turbo", "qwen-plus", "qwen-max", "qwen-max-longcontext",
"text-embedding-v1",
}

View File

@@ -4,12 +4,13 @@ import (
"bufio" "bufio"
"encoding/json" "encoding/json"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/model"
"io" "io"
"net/http" "net/http"
"one-api/common"
"one-api/common/helper"
"one-api/common/logger"
"one-api/relay/channel/openai"
"strings" "strings"
) )
@@ -17,7 +18,7 @@ import (
const EnableSearchModelSuffix = "-internet" const EnableSearchModelSuffix = "-internet"
func ConvertRequest(request openai.GeneralOpenAIRequest) *ChatRequest { func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
messages := make([]Message, 0, len(request.Messages)) messages := make([]Message, 0, len(request.Messages))
for i := 0; i < len(request.Messages); i++ { for i := 0; i < len(request.Messages); i++ {
message := request.Messages[i] message := request.Messages[i]
@@ -32,6 +33,9 @@ func ConvertRequest(request openai.GeneralOpenAIRequest) *ChatRequest {
enableSearch = true enableSearch = true
aliModel = strings.TrimSuffix(aliModel, EnableSearchModelSuffix) aliModel = strings.TrimSuffix(aliModel, EnableSearchModelSuffix)
} }
if request.TopP >= 1 {
request.TopP = 0.9999
}
return &ChatRequest{ return &ChatRequest{
Model: aliModel, Model: aliModel,
Input: Input{ Input: Input{
@@ -40,11 +44,15 @@ func ConvertRequest(request openai.GeneralOpenAIRequest) *ChatRequest {
Parameters: Parameters{ Parameters: Parameters{
EnableSearch: enableSearch, EnableSearch: enableSearch,
IncrementalOutput: request.Stream, IncrementalOutput: request.Stream,
Seed: uint64(request.Seed),
MaxTokens: request.MaxTokens,
Temperature: request.Temperature,
TopP: request.TopP,
}, },
} }
} }
func ConvertEmbeddingRequest(request openai.GeneralOpenAIRequest) *EmbeddingRequest { func ConvertEmbeddingRequest(request model.GeneralOpenAIRequest) *EmbeddingRequest {
return &EmbeddingRequest{ return &EmbeddingRequest{
Model: "text-embedding-v1", Model: "text-embedding-v1",
Input: struct { Input: struct {
@@ -55,7 +63,7 @@ func ConvertEmbeddingRequest(request openai.GeneralOpenAIRequest) *EmbeddingRequ
} }
} }
func EmbeddingHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) { func EmbeddingHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
var aliResponse EmbeddingResponse var aliResponse EmbeddingResponse
err := json.NewDecoder(resp.Body).Decode(&aliResponse) err := json.NewDecoder(resp.Body).Decode(&aliResponse)
if err != nil { if err != nil {
@@ -68,8 +76,8 @@ func EmbeddingHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithSta
} }
if aliResponse.Code != "" { if aliResponse.Code != "" {
return &openai.ErrorWithStatusCode{ return &model.ErrorWithStatusCode{
Error: openai.Error{ Error: model.Error{
Message: aliResponse.Message, Message: aliResponse.Message,
Type: aliResponse.Code, Type: aliResponse.Code,
Param: aliResponse.RequestId, Param: aliResponse.RequestId,
@@ -95,7 +103,7 @@ func embeddingResponseAli2OpenAI(response *EmbeddingResponse) *openai.EmbeddingR
Object: "list", Object: "list",
Data: make([]openai.EmbeddingResponseItem, 0, len(response.Output.Embeddings)), Data: make([]openai.EmbeddingResponseItem, 0, len(response.Output.Embeddings)),
Model: "text-embedding-v1", Model: "text-embedding-v1",
Usage: openai.Usage{TotalTokens: response.Usage.TotalTokens}, Usage: model.Usage{TotalTokens: response.Usage.TotalTokens},
} }
for _, item := range response.Output.Embeddings { for _, item := range response.Output.Embeddings {
@@ -111,7 +119,7 @@ func embeddingResponseAli2OpenAI(response *EmbeddingResponse) *openai.EmbeddingR
func responseAli2OpenAI(response *ChatResponse) *openai.TextResponse { func responseAli2OpenAI(response *ChatResponse) *openai.TextResponse {
choice := openai.TextResponseChoice{ choice := openai.TextResponseChoice{
Index: 0, Index: 0,
Message: openai.Message{ Message: model.Message{
Role: "assistant", Role: "assistant",
Content: response.Output.Text, Content: response.Output.Text,
}, },
@@ -122,7 +130,7 @@ func responseAli2OpenAI(response *ChatResponse) *openai.TextResponse {
Object: "chat.completion", Object: "chat.completion",
Created: helper.GetTimestamp(), Created: helper.GetTimestamp(),
Choices: []openai.TextResponseChoice{choice}, Choices: []openai.TextResponseChoice{choice},
Usage: openai.Usage{ Usage: model.Usage{
PromptTokens: response.Usage.InputTokens, PromptTokens: response.Usage.InputTokens,
CompletionTokens: response.Usage.OutputTokens, CompletionTokens: response.Usage.OutputTokens,
TotalTokens: response.Usage.InputTokens + response.Usage.OutputTokens, TotalTokens: response.Usage.InputTokens + response.Usage.OutputTokens,
@@ -148,8 +156,8 @@ func streamResponseAli2OpenAI(aliResponse *ChatResponse) *openai.ChatCompletions
return &response return &response
} }
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) { func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
var usage openai.Usage var usage model.Usage
scanner := bufio.NewScanner(resp.Body) scanner := bufio.NewScanner(resp.Body)
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 { if atEOF && len(data) == 0 {
@@ -217,7 +225,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
return nil, &usage return nil, &usage
} }
func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) { func Handler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
var aliResponse ChatResponse var aliResponse ChatResponse
responseBody, err := io.ReadAll(resp.Body) responseBody, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
@@ -232,8 +240,8 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
} }
if aliResponse.Code != "" { if aliResponse.Code != "" {
return &openai.ErrorWithStatusCode{ return &model.ErrorWithStatusCode{
Error: openai.Error{ Error: model.Error{
Message: aliResponse.Message, Message: aliResponse.Message,
Type: aliResponse.Code, Type: aliResponse.Code,
Param: aliResponse.RequestId, Param: aliResponse.RequestId,

View File

@@ -16,6 +16,8 @@ type Parameters struct {
Seed uint64 `json:"seed,omitempty"` Seed uint64 `json:"seed,omitempty"`
EnableSearch bool `json:"enable_search,omitempty"` EnableSearch bool `json:"enable_search,omitempty"`
IncrementalOutput bool `json:"incremental_output,omitempty"` IncrementalOutput bool `json:"incremental_output,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
} }
type ChatRequest struct { type ChatRequest struct {

View File

@@ -1,22 +1,63 @@
package anthropic package anthropic
import ( import (
"errors"
"fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/channel"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http" "net/http"
"one-api/relay/channel/openai"
) )
type Adaptor struct { type Adaptor struct {
} }
func (a *Adaptor) Auth(c *gin.Context) error { func (a *Adaptor) Init(meta *util.RelayMeta) {
}
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
return fmt.Sprintf("%s/v1/messages", meta.BaseURL), nil
}
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
channel.SetupCommonRequestHeader(c, req, meta)
req.Header.Set("x-api-key", meta.APIKey)
anthropicVersion := c.Request.Header.Get("anthropic-version")
if anthropicVersion == "" {
anthropicVersion = "2023-06-01"
}
req.Header.Set("anthropic-version", anthropicVersion)
req.Header.Set("anthropic-beta", "messages-2023-12-15")
return nil return nil
} }
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) { func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
return nil, nil if request == nil {
return nil, errors.New("request is nil")
}
return ConvertRequest(*request), nil
} }
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) { func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
return nil, nil, nil return channel.DoRequestHelper(a, c, meta, requestBody)
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
if meta.IsStream {
err, usage = StreamHandler(c, resp)
} else {
err, usage = Handler(c, resp, meta.PromptTokens, meta.ActualModelName)
}
return
}
func (a *Adaptor) GetModelList() []string {
return ModelList
}
func (a *Adaptor) GetChannelName() string {
return "anthropic"
} }

View File

@@ -0,0 +1,8 @@
package anthropic
var ModelList = []string{
"claude-instant-1.2", "claude-2.0", "claude-2.1",
"claude-3-haiku-20240307",
"claude-3-sonnet-20240229",
"claude-3-opus-20240229",
}

View File

@@ -5,82 +5,146 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/image"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/model"
"io" "io"
"net/http" "net/http"
"one-api/common"
"one-api/common/helper"
"one-api/common/logger"
"one-api/relay/channel/openai"
"strings" "strings"
) )
func stopReasonClaude2OpenAI(reason string) string { func stopReasonClaude2OpenAI(reason *string) string {
switch reason { if reason == nil {
return ""
}
switch *reason {
case "end_turn":
return "stop"
case "stop_sequence": case "stop_sequence":
return "stop" return "stop"
case "max_tokens": case "max_tokens":
return "length" return "length"
default: default:
return reason return *reason
} }
} }
func ConvertRequest(textRequest openai.GeneralOpenAIRequest) *Request { func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
claudeRequest := Request{ claudeRequest := Request{
Model: textRequest.Model, Model: textRequest.Model,
Prompt: "", MaxTokens: textRequest.MaxTokens,
MaxTokensToSample: textRequest.MaxTokens, Temperature: textRequest.Temperature,
StopSequences: nil, TopP: textRequest.TopP,
Temperature: textRequest.Temperature, Stream: textRequest.Stream,
TopP: textRequest.TopP,
Stream: textRequest.Stream,
} }
if claudeRequest.MaxTokensToSample == 0 { if claudeRequest.MaxTokens == 0 {
claudeRequest.MaxTokensToSample = 1000000 claudeRequest.MaxTokens = 4096
}
// legacy model name mapping
if claudeRequest.Model == "claude-instant-1" {
claudeRequest.Model = "claude-instant-1.1"
} else if claudeRequest.Model == "claude-2" {
claudeRequest.Model = "claude-2.1"
} }
prompt := ""
for _, message := range textRequest.Messages { for _, message := range textRequest.Messages {
if message.Role == "user" { if message.Role == "system" && claudeRequest.System == "" {
prompt += fmt.Sprintf("\n\nHuman: %s", message.Content) claudeRequest.System = message.StringContent()
} else if message.Role == "assistant" { continue
prompt += fmt.Sprintf("\n\nAssistant: %s", message.Content)
} else if message.Role == "system" {
if prompt == "" {
prompt = message.StringContent()
}
} }
claudeMessage := Message{
Role: message.Role,
}
var content Content
if message.IsStringContent() {
content.Type = "text"
content.Text = message.StringContent()
claudeMessage.Content = append(claudeMessage.Content, content)
claudeRequest.Messages = append(claudeRequest.Messages, claudeMessage)
continue
}
var contents []Content
openaiContent := message.ParseContent()
for _, part := range openaiContent {
var content Content
if part.Type == model.ContentTypeText {
content.Type = "text"
content.Text = part.Text
} else if part.Type == model.ContentTypeImageURL {
content.Type = "image"
content.Source = &ImageSource{
Type: "base64",
}
mimeType, data, _ := image.GetImageFromUrl(part.ImageURL.Url)
content.Source.MediaType = mimeType
content.Source.Data = data
}
contents = append(contents, content)
}
claudeMessage.Content = contents
claudeRequest.Messages = append(claudeRequest.Messages, claudeMessage)
} }
prompt += "\n\nAssistant:"
claudeRequest.Prompt = prompt
return &claudeRequest return &claudeRequest
} }
func streamResponseClaude2OpenAI(claudeResponse *Response) *openai.ChatCompletionsStreamResponse { // https://docs.anthropic.com/claude/reference/messages-streaming
func streamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCompletionsStreamResponse, *Response) {
var response *Response
var responseText string
var stopReason string
switch claudeResponse.Type {
case "message_start":
return nil, claudeResponse.Message
case "content_block_start":
if claudeResponse.ContentBlock != nil {
responseText = claudeResponse.ContentBlock.Text
}
case "content_block_delta":
if claudeResponse.Delta != nil {
responseText = claudeResponse.Delta.Text
}
case "message_delta":
if claudeResponse.Usage != nil {
response = &Response{
Usage: *claudeResponse.Usage,
}
}
if claudeResponse.Delta != nil && claudeResponse.Delta.StopReason != nil {
stopReason = *claudeResponse.Delta.StopReason
}
}
var choice openai.ChatCompletionsStreamResponseChoice var choice openai.ChatCompletionsStreamResponseChoice
choice.Delta.Content = claudeResponse.Completion choice.Delta.Content = responseText
finishReason := stopReasonClaude2OpenAI(claudeResponse.StopReason) choice.Delta.Role = "assistant"
finishReason := stopReasonClaude2OpenAI(&stopReason)
if finishReason != "null" { if finishReason != "null" {
choice.FinishReason = &finishReason choice.FinishReason = &finishReason
} }
var response openai.ChatCompletionsStreamResponse var openaiResponse openai.ChatCompletionsStreamResponse
response.Object = "chat.completion.chunk" openaiResponse.Object = "chat.completion.chunk"
response.Model = claudeResponse.Model openaiResponse.Choices = []openai.ChatCompletionsStreamResponseChoice{choice}
response.Choices = []openai.ChatCompletionsStreamResponseChoice{choice} return &openaiResponse, response
return &response
} }
func responseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse { func responseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse {
var responseText string
if len(claudeResponse.Content) > 0 {
responseText = claudeResponse.Content[0].Text
}
choice := openai.TextResponseChoice{ choice := openai.TextResponseChoice{
Index: 0, Index: 0,
Message: openai.Message{ Message: model.Message{
Role: "assistant", Role: "assistant",
Content: strings.TrimPrefix(claudeResponse.Completion, " "), Content: responseText,
Name: nil, Name: nil,
}, },
FinishReason: stopReasonClaude2OpenAI(claudeResponse.StopReason), FinishReason: stopReasonClaude2OpenAI(claudeResponse.StopReason),
} }
fullTextResponse := openai.TextResponse{ fullTextResponse := openai.TextResponse{
Id: fmt.Sprintf("chatcmpl-%s", helper.GetUUID()), Id: fmt.Sprintf("chatcmpl-%s", claudeResponse.Id),
Model: claudeResponse.Model,
Object: "chat.completion", Object: "chat.completion",
Created: helper.GetTimestamp(), Created: helper.GetTimestamp(),
Choices: []openai.TextResponseChoice{choice}, Choices: []openai.TextResponseChoice{choice},
@@ -88,17 +152,15 @@ func responseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse {
return &fullTextResponse return &fullTextResponse
} }
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, string) { func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
responseText := ""
responseId := fmt.Sprintf("chatcmpl-%s", helper.GetUUID())
createdTime := helper.GetTimestamp() createdTime := helper.GetTimestamp()
scanner := bufio.NewScanner(resp.Body) scanner := bufio.NewScanner(resp.Body)
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 { if atEOF && len(data) == 0 {
return 0, nil, nil return 0, nil, nil
} }
if i := strings.Index(string(data), "\r\n\r\n"); i >= 0 { if i := strings.Index(string(data), "\n"); i >= 0 {
return i + 4, data[0:i], nil return i + 1, data[0:i], nil
} }
if atEOF { if atEOF {
return len(data), data, nil return len(data), data, nil
@@ -110,29 +172,45 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
go func() { go func() {
for scanner.Scan() { for scanner.Scan() {
data := scanner.Text() data := scanner.Text()
if !strings.HasPrefix(data, "event: completion") { if len(data) < 6 {
continue continue
} }
data = strings.TrimPrefix(data, "event: completion\r\ndata: ") if !strings.HasPrefix(data, "data: ") {
continue
}
data = strings.TrimPrefix(data, "data: ")
dataChan <- data dataChan <- data
} }
stopChan <- true stopChan <- true
}() }()
common.SetEventStreamHeaders(c) common.SetEventStreamHeaders(c)
var usage model.Usage
var modelName string
var id string
c.Stream(func(w io.Writer) bool { c.Stream(func(w io.Writer) bool {
select { select {
case data := <-dataChan: case data := <-dataChan:
// some implementations may add \r at the end of data // some implementations may add \r at the end of data
data = strings.TrimSuffix(data, "\r") data = strings.TrimSuffix(data, "\r")
var claudeResponse Response var claudeResponse StreamResponse
err := json.Unmarshal([]byte(data), &claudeResponse) err := json.Unmarshal([]byte(data), &claudeResponse)
if err != nil { if err != nil {
logger.SysError("error unmarshalling stream response: " + err.Error()) logger.SysError("error unmarshalling stream response: " + err.Error())
return true return true
} }
responseText += claudeResponse.Completion response, meta := streamResponseClaude2OpenAI(&claudeResponse)
response := streamResponseClaude2OpenAI(&claudeResponse) if meta != nil {
response.Id = responseId usage.PromptTokens += meta.Usage.InputTokens
usage.CompletionTokens += meta.Usage.OutputTokens
modelName = meta.Model
id = fmt.Sprintf("chatcmpl-%s", meta.Id)
return true
}
if response == nil {
return true
}
response.Id = id
response.Model = modelName
response.Created = createdTime response.Created = createdTime
jsonStr, err := json.Marshal(response) jsonStr, err := json.Marshal(response)
if err != nil { if err != nil {
@@ -146,14 +224,11 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
return false return false
} }
}) })
err := resp.Body.Close() _ = resp.Body.Close()
if err != nil { return nil, &usage
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
}
return nil, responseText
} }
func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*openai.ErrorWithStatusCode, *openai.Usage) { func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) {
responseBody, err := io.ReadAll(resp.Body) responseBody, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
@@ -168,8 +243,8 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
} }
if claudeResponse.Error.Type != "" { if claudeResponse.Error.Type != "" {
return &openai.ErrorWithStatusCode{ return &model.ErrorWithStatusCode{
Error: openai.Error{ Error: model.Error{
Message: claudeResponse.Error.Message, Message: claudeResponse.Error.Message,
Type: claudeResponse.Error.Type, Type: claudeResponse.Error.Type,
Param: "", Param: "",
@@ -179,12 +254,11 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string
}, nil }, nil
} }
fullTextResponse := responseClaude2OpenAI(&claudeResponse) fullTextResponse := responseClaude2OpenAI(&claudeResponse)
fullTextResponse.Model = model fullTextResponse.Model = modelName
completionTokens := openai.CountTokenText(claudeResponse.Completion, model) usage := model.Usage{
usage := openai.Usage{ PromptTokens: claudeResponse.Usage.InputTokens,
PromptTokens: promptTokens, CompletionTokens: claudeResponse.Usage.OutputTokens,
CompletionTokens: completionTokens, TotalTokens: claudeResponse.Usage.InputTokens + claudeResponse.Usage.OutputTokens,
TotalTokens: promptTokens + completionTokens,
} }
fullTextResponse.Usage = usage fullTextResponse.Usage = usage
jsonResponse, err := json.Marshal(fullTextResponse) jsonResponse, err := json.Marshal(fullTextResponse)

View File

@@ -1,19 +1,44 @@
package anthropic package anthropic
// https://docs.anthropic.com/claude/reference/messages_post
type Metadata struct { type Metadata struct {
UserId string `json:"user_id"` UserId string `json:"user_id"`
} }
type ImageSource struct {
Type string `json:"type"`
MediaType string `json:"media_type"`
Data string `json:"data"`
}
type Content struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
Source *ImageSource `json:"source,omitempty"`
}
type Message struct {
Role string `json:"role"`
Content []Content `json:"content"`
}
type Request struct { type Request struct {
Model string `json:"model"` Model string `json:"model"`
Prompt string `json:"prompt"` Messages []Message `json:"messages"`
MaxTokensToSample int `json:"max_tokens_to_sample"` System string `json:"system,omitempty"`
StopSequences []string `json:"stop_sequences,omitempty"` MaxTokens int `json:"max_tokens,omitempty"`
Temperature float64 `json:"temperature,omitempty"` StopSequences []string `json:"stop_sequences,omitempty"`
TopP float64 `json:"top_p,omitempty"` Stream bool `json:"stream,omitempty"`
TopK int `json:"top_k,omitempty"` Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
TopK int `json:"top_k,omitempty"`
//Metadata `json:"metadata,omitempty"` //Metadata `json:"metadata,omitempty"`
Stream bool `json:"stream,omitempty"` }
type Usage struct {
InputTokens int `json:"input_tokens"`
OutputTokens int `json:"output_tokens"`
} }
type Error struct { type Error struct {
@@ -22,8 +47,29 @@ type Error struct {
} }
type Response struct { type Response struct {
Completion string `json:"completion"` Id string `json:"id"`
StopReason string `json:"stop_reason"` Type string `json:"type"`
Model string `json:"model"` Role string `json:"role"`
Error Error `json:"error"` Content []Content `json:"content"`
Model string `json:"model"`
StopReason *string `json:"stop_reason"`
StopSequence *string `json:"stop_sequence"`
Usage Usage `json:"usage"`
Error Error `json:"error"`
}
type Delta struct {
Type string `json:"type"`
Text string `json:"text"`
StopReason *string `json:"stop_reason"`
StopSequence *string `json:"stop_sequence"`
}
type StreamResponse struct {
Type string `json:"type"`
Message *Response `json:"message"`
Index int `json:"index"`
ContentBlock *Content `json:"content_block"`
Delta *Delta `json:"delta"`
Usage *Usage `json:"usage"`
} }

View File

@@ -0,0 +1,7 @@
package baichuan
var ModelList = []string{
"Baichuan2-Turbo",
"Baichuan2-Turbo-192k",
"Baichuan-Text-Embedding",
}

View File

@@ -1,22 +1,118 @@
package baidu package baidu
import ( import (
"github.com/gin-gonic/gin" "errors"
"fmt"
"io"
"net/http" "net/http"
"one-api/relay/channel/openai" "strings"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/channel"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
) )
type Adaptor struct { type Adaptor struct {
} }
func (a *Adaptor) Auth(c *gin.Context) error { func (a *Adaptor) Init(meta *util.RelayMeta) {
}
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/clntwmv7t
suffix := "chat/"
if strings.HasPrefix(meta.ActualModelName, "Embedding") {
suffix = "embeddings/"
}
if strings.HasPrefix(meta.ActualModelName, "bge-large") {
suffix = "embeddings/"
}
if strings.HasPrefix(meta.ActualModelName, "tao-8k") {
suffix = "embeddings/"
}
switch meta.ActualModelName {
case "ERNIE-4.0":
suffix += "completions_pro"
case "ERNIE-Bot-4":
suffix += "completions_pro"
case "ERNIE-3.5-8K":
suffix += "completions"
case "ERNIE-Bot-8K":
suffix += "ernie_bot_8k"
case "ERNIE-Bot":
suffix += "completions"
case "ERNIE-Speed":
suffix += "ernie_speed"
case "ERNIE-Bot-turbo":
suffix += "eb-instant"
case "BLOOMZ-7B":
suffix += "bloomz_7b1"
case "Embedding-V1":
suffix += "embedding-v1"
case "bge-large-zh":
suffix += "bge_large_zh"
case "bge-large-en":
suffix += "bge_large_en"
case "tao-8k":
suffix += "tao_8k"
default:
suffix += meta.ActualModelName
}
fullRequestURL := fmt.Sprintf("%s/rpc/2.0/ai_custom/v1/wenxinworkshop/%s", meta.BaseURL, suffix)
var accessToken string
var err error
if accessToken, err = GetAccessToken(meta.APIKey); err != nil {
return "", err
}
fullRequestURL += "?access_token=" + accessToken
return fullRequestURL, nil
}
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
channel.SetupCommonRequestHeader(c, req, meta)
req.Header.Set("Authorization", "Bearer "+meta.APIKey)
return nil return nil
} }
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) { func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
return nil, nil if request == nil {
return nil, errors.New("request is nil")
}
switch relayMode {
case constant.RelayModeEmbeddings:
baiduEmbeddingRequest := ConvertEmbeddingRequest(*request)
return baiduEmbeddingRequest, nil
default:
baiduRequest := ConvertRequest(*request)
return baiduRequest, nil
}
} }
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) { func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
return nil, nil, nil return channel.DoRequestHelper(a, c, meta, requestBody)
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
if meta.IsStream {
err, usage = StreamHandler(c, resp)
} else {
switch meta.Mode {
case constant.RelayModeEmbeddings:
err, usage = EmbeddingHandler(c, resp)
default:
err, usage = Handler(c, resp)
}
}
return
}
func (a *Adaptor) GetModelList() []string {
return ModelList
}
func (a *Adaptor) GetChannelName() string {
return "baidu"
} }

View File

@@ -0,0 +1,13 @@
package baidu
var ModelList = []string{
"ERNIE-Bot-4",
"ERNIE-Bot-8K",
"ERNIE-Bot",
"ERNIE-Speed",
"ERNIE-Bot-turbo",
"Embedding-V1",
"bge-large-zh",
"bge-large-en",
"tao-8k",
}

View File

@@ -6,13 +6,14 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io" "io"
"net/http" "net/http"
"one-api/common"
"one-api/common/logger"
"one-api/relay/channel/openai"
"one-api/relay/constant"
"one-api/relay/util"
"strings" "strings"
"sync" "sync"
"time" "time"
@@ -31,9 +32,16 @@ type Message struct {
} }
type ChatRequest struct { type ChatRequest struct {
Messages []Message `json:"messages"` Messages []Message `json:"messages"`
Stream bool `json:"stream"` Temperature float64 `json:"temperature,omitempty"`
UserId string `json:"user_id,omitempty"` TopP float64 `json:"top_p,omitempty"`
PenaltyScore float64 `json:"penalty_score,omitempty"`
Stream bool `json:"stream,omitempty"`
System string `json:"system,omitempty"`
DisableSearch bool `json:"disable_search,omitempty"`
EnableCitation bool `json:"enable_citation,omitempty"`
MaxOutputTokens int `json:"max_output_tokens,omitempty"`
UserId string `json:"user_id,omitempty"`
} }
type Error struct { type Error struct {
@@ -43,35 +51,35 @@ type Error struct {
var baiduTokenStore sync.Map var baiduTokenStore sync.Map
func ConvertRequest(request openai.GeneralOpenAIRequest) *ChatRequest { func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
messages := make([]Message, 0, len(request.Messages)) baiduRequest := ChatRequest{
Messages: make([]Message, 0, len(request.Messages)),
Temperature: request.Temperature,
TopP: request.TopP,
PenaltyScore: request.FrequencyPenalty,
Stream: request.Stream,
DisableSearch: false,
EnableCitation: false,
MaxOutputTokens: request.MaxTokens,
UserId: request.User,
}
for _, message := range request.Messages { for _, message := range request.Messages {
if message.Role == "system" { if message.Role == "system" {
messages = append(messages, Message{ baiduRequest.System = message.StringContent()
Role: "user",
Content: message.StringContent(),
})
messages = append(messages, Message{
Role: "assistant",
Content: "Okay",
})
} else { } else {
messages = append(messages, Message{ baiduRequest.Messages = append(baiduRequest.Messages, Message{
Role: message.Role, Role: message.Role,
Content: message.StringContent(), Content: message.StringContent(),
}) })
} }
} }
return &ChatRequest{ return &baiduRequest
Messages: messages,
Stream: request.Stream,
}
} }
func responseBaidu2OpenAI(response *ChatResponse) *openai.TextResponse { func responseBaidu2OpenAI(response *ChatResponse) *openai.TextResponse {
choice := openai.TextResponseChoice{ choice := openai.TextResponseChoice{
Index: 0, Index: 0,
Message: openai.Message{ Message: model.Message{
Role: "assistant", Role: "assistant",
Content: response.Result, Content: response.Result,
}, },
@@ -103,7 +111,7 @@ func streamResponseBaidu2OpenAI(baiduResponse *ChatStreamResponse) *openai.ChatC
return &response return &response
} }
func ConvertEmbeddingRequest(request openai.GeneralOpenAIRequest) *EmbeddingRequest { func ConvertEmbeddingRequest(request model.GeneralOpenAIRequest) *EmbeddingRequest {
return &EmbeddingRequest{ return &EmbeddingRequest{
Input: request.ParseInput(), Input: request.ParseInput(),
} }
@@ -126,8 +134,8 @@ func embeddingResponseBaidu2OpenAI(response *EmbeddingResponse) *openai.Embeddin
return &openAIEmbeddingResponse return &openAIEmbeddingResponse
} }
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) { func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
var usage openai.Usage var usage model.Usage
scanner := bufio.NewScanner(resp.Body) scanner := bufio.NewScanner(resp.Body)
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 { if atEOF && len(data) == 0 {
@@ -189,7 +197,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
return nil, &usage return nil, &usage
} }
func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) { func Handler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
var baiduResponse ChatResponse var baiduResponse ChatResponse
responseBody, err := io.ReadAll(resp.Body) responseBody, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
@@ -204,8 +212,8 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
} }
if baiduResponse.ErrorMsg != "" { if baiduResponse.ErrorMsg != "" {
return &openai.ErrorWithStatusCode{ return &model.ErrorWithStatusCode{
Error: openai.Error{ Error: model.Error{
Message: baiduResponse.ErrorMsg, Message: baiduResponse.ErrorMsg,
Type: "baidu_error", Type: "baidu_error",
Param: "", Param: "",
@@ -226,7 +234,7 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
return nil, &fullTextResponse.Usage return nil, &fullTextResponse.Usage
} }
func EmbeddingHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) { func EmbeddingHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
var baiduResponse EmbeddingResponse var baiduResponse EmbeddingResponse
responseBody, err := io.ReadAll(resp.Body) responseBody, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
@@ -241,8 +249,8 @@ func EmbeddingHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithSta
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
} }
if baiduResponse.ErrorMsg != "" { if baiduResponse.ErrorMsg != "" {
return &openai.ErrorWithStatusCode{ return &model.ErrorWithStatusCode{
Error: openai.Error{ Error: model.Error{
Message: baiduResponse.ErrorMsg, Message: baiduResponse.ErrorMsg,
Type: "baidu_error", Type: "baidu_error",
Param: "", Param: "",

View File

@@ -1,18 +1,18 @@
package baidu package baidu
import ( import (
"one-api/relay/channel/openai" "github.com/songquanpeng/one-api/relay/model"
"time" "time"
) )
type ChatResponse struct { type ChatResponse struct {
Id string `json:"id"` Id string `json:"id"`
Object string `json:"object"` Object string `json:"object"`
Created int64 `json:"created"` Created int64 `json:"created"`
Result string `json:"result"` Result string `json:"result"`
IsTruncated bool `json:"is_truncated"` IsTruncated bool `json:"is_truncated"`
NeedClearHistory bool `json:"need_clear_history"` NeedClearHistory bool `json:"need_clear_history"`
Usage openai.Usage `json:"usage"` Usage model.Usage `json:"usage"`
Error Error
} }
@@ -37,7 +37,7 @@ type EmbeddingResponse struct {
Object string `json:"object"` Object string `json:"object"`
Created int64 `json:"created"` Created int64 `json:"created"`
Data []EmbeddingData `json:"data"` Data []EmbeddingData `json:"data"`
Usage openai.Usage `json:"usage"` Usage model.Usage `json:"usage"`
Error Error
} }

51
relay/channel/common.go Normal file
View File

@@ -0,0 +1,51 @@
package channel
import (
"errors"
"fmt"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http"
)
func SetupCommonRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) {
req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
req.Header.Set("Accept", c.Request.Header.Get("Accept"))
if meta.IsStream && c.Request.Header.Get("Accept") == "" {
req.Header.Set("Accept", "text/event-stream")
}
}
func DoRequestHelper(a Adaptor, c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
fullRequestURL, err := a.GetRequestURL(meta)
if err != nil {
return nil, fmt.Errorf("get request url failed: %w", err)
}
req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
if err != nil {
return nil, fmt.Errorf("new request failed: %w", err)
}
err = a.SetupRequestHeader(c, req, meta)
if err != nil {
return nil, fmt.Errorf("setup request header failed: %w", err)
}
resp, err := DoRequest(c, req)
if err != nil {
return nil, fmt.Errorf("do request failed: %w", err)
}
return resp, nil
}
func DoRequest(c *gin.Context, req *http.Request) (*http.Response, error) {
resp, err := util.HTTPClient.Do(req)
if err != nil {
return nil, err
}
if resp == nil {
return nil, errors.New("resp is nil")
}
_ = req.Body.Close()
_ = c.Request.Body.Close()
return resp, nil
}

View File

@@ -0,0 +1,66 @@
package gemini
import (
"errors"
"fmt"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/helper"
channelhelper "github.com/songquanpeng/one-api/relay/channel"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http"
)
type Adaptor struct {
}
func (a *Adaptor) Init(meta *util.RelayMeta) {
}
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
version := helper.AssignOrDefault(meta.APIVersion, "v1")
action := "generateContent"
if meta.IsStream {
action = "streamGenerateContent"
}
return fmt.Sprintf("%s/%s/models/%s:%s", meta.BaseURL, version, meta.ActualModelName, action), nil
}
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
channelhelper.SetupCommonRequestHeader(c, req, meta)
req.Header.Set("x-goog-api-key", meta.APIKey)
return nil
}
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
if request == nil {
return nil, errors.New("request is nil")
}
return ConvertRequest(*request), nil
}
func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
return channelhelper.DoRequestHelper(a, c, meta, requestBody)
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
if meta.IsStream {
var responseText string
err, responseText = StreamHandler(c, resp)
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
} else {
err, usage = Handler(c, resp, meta.PromptTokens, meta.ActualModelName)
}
return
}
func (a *Adaptor) GetModelList() []string {
return ModelList
}
func (a *Adaptor) GetChannelName() string {
return "google gemini"
}

View File

@@ -0,0 +1,6 @@
package gemini
var ModelList = []string{
"gemini-pro", "gemini-1.0-pro-001",
"gemini-pro-vision", "gemini-1.0-pro-vision-001",
}

View File

@@ -1,18 +1,19 @@
package google package gemini
import ( import (
"bufio" "bufio"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/image"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/model"
"io" "io"
"net/http" "net/http"
"one-api/common"
"one-api/common/config"
"one-api/common/helper"
"one-api/common/image"
"one-api/common/logger"
"one-api/relay/channel/openai"
"one-api/relay/constant"
"strings" "strings"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
@@ -21,14 +22,14 @@ import (
// https://ai.google.dev/docs/gemini_api_overview?hl=zh-cn // https://ai.google.dev/docs/gemini_api_overview?hl=zh-cn
const ( const (
GeminiVisionMaxImageNum = 16 VisionMaxImageNum = 16
) )
// Setting safety to the lowest possible values since Gemini is already powerless enough // Setting safety to the lowest possible values since Gemini is already powerless enough
func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRequest { func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
geminiRequest := GeminiChatRequest{ geminiRequest := ChatRequest{
Contents: make([]GeminiChatContent, 0, len(textRequest.Messages)), Contents: make([]ChatContent, 0, len(textRequest.Messages)),
SafetySettings: []GeminiChatSafetySettings{ SafetySettings: []ChatSafetySettings{
{ {
Category: "HARM_CATEGORY_HARASSMENT", Category: "HARM_CATEGORY_HARASSMENT",
Threshold: config.GeminiSafetySetting, Threshold: config.GeminiSafetySetting,
@@ -46,14 +47,14 @@ func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRe
Threshold: config.GeminiSafetySetting, Threshold: config.GeminiSafetySetting,
}, },
}, },
GenerationConfig: GeminiChatGenerationConfig{ GenerationConfig: ChatGenerationConfig{
Temperature: textRequest.Temperature, Temperature: textRequest.Temperature,
TopP: textRequest.TopP, TopP: textRequest.TopP,
MaxOutputTokens: textRequest.MaxTokens, MaxOutputTokens: textRequest.MaxTokens,
}, },
} }
if textRequest.Functions != nil { if textRequest.Functions != nil {
geminiRequest.Tools = []GeminiChatTools{ geminiRequest.Tools = []ChatTools{
{ {
FunctionDeclarations: textRequest.Functions, FunctionDeclarations: textRequest.Functions,
}, },
@@ -61,30 +62,30 @@ func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRe
} }
shouldAddDummyModelMessage := false shouldAddDummyModelMessage := false
for _, message := range textRequest.Messages { for _, message := range textRequest.Messages {
content := GeminiChatContent{ content := ChatContent{
Role: message.Role, Role: message.Role,
Parts: []GeminiPart{ Parts: []Part{
{ {
Text: message.StringContent(), Text: message.StringContent(),
}, },
}, },
} }
openaiContent := message.ParseContent() openaiContent := message.ParseContent()
var parts []GeminiPart var parts []Part
imageNum := 0 imageNum := 0
for _, part := range openaiContent { for _, part := range openaiContent {
if part.Type == openai.ContentTypeText { if part.Type == model.ContentTypeText {
parts = append(parts, GeminiPart{ parts = append(parts, Part{
Text: part.Text, Text: part.Text,
}) })
} else if part.Type == openai.ContentTypeImageURL { } else if part.Type == model.ContentTypeImageURL {
imageNum += 1 imageNum += 1
if imageNum > GeminiVisionMaxImageNum { if imageNum > VisionMaxImageNum {
continue continue
} }
mimeType, data, _ := image.GetImageFromUrl(part.ImageURL.Url) mimeType, data, _ := image.GetImageFromUrl(part.ImageURL.Url)
parts = append(parts, GeminiPart{ parts = append(parts, Part{
InlineData: &GeminiInlineData{ InlineData: &InlineData{
MimeType: mimeType, MimeType: mimeType,
Data: data, Data: data,
}, },
@@ -106,9 +107,9 @@ func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRe
// If a system message is the last message, we need to add a dummy model message to make gemini happy // If a system message is the last message, we need to add a dummy model message to make gemini happy
if shouldAddDummyModelMessage { if shouldAddDummyModelMessage {
geminiRequest.Contents = append(geminiRequest.Contents, GeminiChatContent{ geminiRequest.Contents = append(geminiRequest.Contents, ChatContent{
Role: "model", Role: "model",
Parts: []GeminiPart{ Parts: []Part{
{ {
Text: "Okay", Text: "Okay",
}, },
@@ -121,12 +122,12 @@ func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRe
return &geminiRequest return &geminiRequest
} }
type GeminiChatResponse struct { type ChatResponse struct {
Candidates []GeminiChatCandidate `json:"candidates"` Candidates []ChatCandidate `json:"candidates"`
PromptFeedback GeminiChatPromptFeedback `json:"promptFeedback"` PromptFeedback ChatPromptFeedback `json:"promptFeedback"`
} }
func (g *GeminiChatResponse) GetResponseText() string { func (g *ChatResponse) GetResponseText() string {
if g == nil { if g == nil {
return "" return ""
} }
@@ -136,23 +137,23 @@ func (g *GeminiChatResponse) GetResponseText() string {
return "" return ""
} }
type GeminiChatCandidate struct { type ChatCandidate struct {
Content GeminiChatContent `json:"content"` Content ChatContent `json:"content"`
FinishReason string `json:"finishReason"` FinishReason string `json:"finishReason"`
Index int64 `json:"index"` Index int64 `json:"index"`
SafetyRatings []GeminiChatSafetyRating `json:"safetyRatings"` SafetyRatings []ChatSafetyRating `json:"safetyRatings"`
} }
type GeminiChatSafetyRating struct { type ChatSafetyRating struct {
Category string `json:"category"` Category string `json:"category"`
Probability string `json:"probability"` Probability string `json:"probability"`
} }
type GeminiChatPromptFeedback struct { type ChatPromptFeedback struct {
SafetyRatings []GeminiChatSafetyRating `json:"safetyRatings"` SafetyRatings []ChatSafetyRating `json:"safetyRatings"`
} }
func responseGeminiChat2OpenAI(response *GeminiChatResponse) *openai.TextResponse { func responseGeminiChat2OpenAI(response *ChatResponse) *openai.TextResponse {
fullTextResponse := openai.TextResponse{ fullTextResponse := openai.TextResponse{
Id: fmt.Sprintf("chatcmpl-%s", helper.GetUUID()), Id: fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
Object: "chat.completion", Object: "chat.completion",
@@ -162,7 +163,7 @@ func responseGeminiChat2OpenAI(response *GeminiChatResponse) *openai.TextRespons
for i, candidate := range response.Candidates { for i, candidate := range response.Candidates {
choice := openai.TextResponseChoice{ choice := openai.TextResponseChoice{
Index: i, Index: i,
Message: openai.Message{ Message: model.Message{
Role: "assistant", Role: "assistant",
Content: "", Content: "",
}, },
@@ -176,7 +177,7 @@ func responseGeminiChat2OpenAI(response *GeminiChatResponse) *openai.TextRespons
return &fullTextResponse return &fullTextResponse
} }
func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) *openai.ChatCompletionsStreamResponse { func streamResponseGeminiChat2OpenAI(geminiResponse *ChatResponse) *openai.ChatCompletionsStreamResponse {
var choice openai.ChatCompletionsStreamResponseChoice var choice openai.ChatCompletionsStreamResponseChoice
choice.Delta.Content = geminiResponse.GetResponseText() choice.Delta.Content = geminiResponse.GetResponseText()
choice.FinishReason = &constant.StopFinishReason choice.FinishReason = &constant.StopFinishReason
@@ -187,7 +188,7 @@ func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) *openai
return &response return &response
} }
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, string) { func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, string) {
responseText := "" responseText := ""
dataChan := make(chan string) dataChan := make(chan string)
stopChan := make(chan bool) stopChan := make(chan bool)
@@ -257,7 +258,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
return nil, responseText return nil, responseText
} }
func GeminiHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*openai.ErrorWithStatusCode, *openai.Usage) { func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) {
responseBody, err := io.ReadAll(resp.Body) responseBody, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
@@ -266,14 +267,14 @@ func GeminiHandler(c *gin.Context, resp *http.Response, promptTokens int, model
if err != nil { if err != nil {
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
} }
var geminiResponse GeminiChatResponse var geminiResponse ChatResponse
err = json.Unmarshal(responseBody, &geminiResponse) err = json.Unmarshal(responseBody, &geminiResponse)
if err != nil { if err != nil {
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
} }
if len(geminiResponse.Candidates) == 0 { if len(geminiResponse.Candidates) == 0 {
return &openai.ErrorWithStatusCode{ return &model.ErrorWithStatusCode{
Error: openai.Error{ Error: model.Error{
Message: "No candidates returned", Message: "No candidates returned",
Type: "server_error", Type: "server_error",
Param: "", Param: "",
@@ -283,9 +284,9 @@ func GeminiHandler(c *gin.Context, resp *http.Response, promptTokens int, model
}, nil }, nil
} }
fullTextResponse := responseGeminiChat2OpenAI(&geminiResponse) fullTextResponse := responseGeminiChat2OpenAI(&geminiResponse)
fullTextResponse.Model = model fullTextResponse.Model = modelName
completionTokens := openai.CountTokenText(geminiResponse.GetResponseText(), model) completionTokens := openai.CountTokenText(geminiResponse.GetResponseText(), modelName)
usage := openai.Usage{ usage := model.Usage{
PromptTokens: promptTokens, PromptTokens: promptTokens,
CompletionTokens: completionTokens, CompletionTokens: completionTokens,
TotalTokens: promptTokens + completionTokens, TotalTokens: promptTokens + completionTokens,

View File

@@ -0,0 +1,41 @@
package gemini
type ChatRequest struct {
Contents []ChatContent `json:"contents"`
SafetySettings []ChatSafetySettings `json:"safety_settings,omitempty"`
GenerationConfig ChatGenerationConfig `json:"generation_config,omitempty"`
Tools []ChatTools `json:"tools,omitempty"`
}
type InlineData struct {
MimeType string `json:"mimeType"`
Data string `json:"data"`
}
type Part struct {
Text string `json:"text,omitempty"`
InlineData *InlineData `json:"inlineData,omitempty"`
}
type ChatContent struct {
Role string `json:"role,omitempty"`
Parts []Part `json:"parts"`
}
type ChatSafetySettings struct {
Category string `json:"category"`
Threshold string `json:"threshold"`
}
type ChatTools struct {
FunctionDeclarations any `json:"functionDeclarations,omitempty"`
}
type ChatGenerationConfig struct {
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"topP,omitempty"`
TopK float64 `json:"topK,omitempty"`
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
CandidateCount int `json:"candidateCount,omitempty"`
StopSequences []string `json:"stopSequences,omitempty"`
}

View File

@@ -1,22 +0,0 @@
package google
import (
"github.com/gin-gonic/gin"
"net/http"
"one-api/relay/channel/openai"
)
type Adaptor struct {
}
func (a *Adaptor) Auth(c *gin.Context) error {
return nil
}
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) {
return nil, nil
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) {
return nil, nil, nil
}

View File

@@ -1,80 +0,0 @@
package google
import (
"one-api/relay/channel/openai"
)
type GeminiChatRequest struct {
Contents []GeminiChatContent `json:"contents"`
SafetySettings []GeminiChatSafetySettings `json:"safety_settings,omitempty"`
GenerationConfig GeminiChatGenerationConfig `json:"generation_config,omitempty"`
Tools []GeminiChatTools `json:"tools,omitempty"`
}
type GeminiInlineData struct {
MimeType string `json:"mimeType"`
Data string `json:"data"`
}
type GeminiPart struct {
Text string `json:"text,omitempty"`
InlineData *GeminiInlineData `json:"inlineData,omitempty"`
}
type GeminiChatContent struct {
Role string `json:"role,omitempty"`
Parts []GeminiPart `json:"parts"`
}
type GeminiChatSafetySettings struct {
Category string `json:"category"`
Threshold string `json:"threshold"`
}
type GeminiChatTools struct {
FunctionDeclarations any `json:"functionDeclarations,omitempty"`
}
type GeminiChatGenerationConfig struct {
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"topP,omitempty"`
TopK float64 `json:"topK,omitempty"`
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
CandidateCount int `json:"candidateCount,omitempty"`
StopSequences []string `json:"stopSequences,omitempty"`
}
type PaLMChatMessage struct {
Author string `json:"author"`
Content string `json:"content"`
}
type PaLMFilter struct {
Reason string `json:"reason"`
Message string `json:"message"`
}
type PaLMPrompt struct {
Messages []PaLMChatMessage `json:"messages"`
}
type PaLMChatRequest struct {
Prompt PaLMPrompt `json:"prompt"`
Temperature float64 `json:"temperature,omitempty"`
CandidateCount int `json:"candidateCount,omitempty"`
TopP float64 `json:"topP,omitempty"`
TopK int `json:"topK,omitempty"`
}
type PaLMError struct {
Code int `json:"code"`
Message string `json:"message"`
Status string `json:"status"`
}
type PaLMChatResponse struct {
Candidates []PaLMChatMessage `json:"candidates"`
Messages []openai.Message `json:"messages"`
Filters []PaLMFilter `json:"filters"`
Error PaLMError `json:"error"`
}

View File

@@ -0,0 +1,10 @@
package groq
// https://console.groq.com/docs/models
var ModelList = []string{
"gemma-7b-it",
"llama2-7b-2048",
"llama2-70b-4096",
"mixtral-8x7b-32768",
}

View File

@@ -2,14 +2,19 @@ package channel
import ( import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http" "net/http"
"one-api/relay/channel/openai"
) )
type Adaptor interface { type Adaptor interface {
GetRequestURL() string Init(meta *util.RelayMeta)
Auth(c *gin.Context) error GetRequestURL(meta *util.RelayMeta) (string, error)
ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error
DoRequest(request *openai.GeneralOpenAIRequest) error ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error)
DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error)
DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode)
GetModelList() []string
GetChannelName() string
} }

View File

@@ -0,0 +1,9 @@
package lingyiwanwu
// https://platform.lingyiwanwu.com/docs
var ModelList = []string{
"yi-34b-chat-0205",
"yi-34b-chat-200k",
"yi-vl-plus",
}

View File

@@ -0,0 +1,7 @@
package minimax
var ModelList = []string{
"abab5.5s-chat",
"abab5.5-chat",
"abab6-chat",
}

View File

@@ -0,0 +1,14 @@
package minimax
import (
"fmt"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/util"
)
func GetRequestURL(meta *util.RelayMeta) (string, error) {
if meta.Mode == constant.RelayModeChatCompletions {
return fmt.Sprintf("%s/v1/text/chatcompletion_v2", meta.BaseURL), nil
}
return "", fmt.Errorf("unsupported relay mode %d for minimax", meta.Mode)
}

View File

@@ -0,0 +1,10 @@
package mistral
var ModelList = []string{
"open-mistral-7b",
"open-mixtral-8x7b",
"mistral-small-latest",
"mistral-medium-latest",
"mistral-large-latest",
"mistral-embed",
}

Some files were not shown because too many files have changed in this diff Show More