mirror of
				https://github.com/songquanpeng/one-api.git
				synced 2025-10-30 13:23:42 +08:00 
			
		
		
		
	Compare commits
	
		
			43 Commits
		
	
	
		
			v0.4.7
			...
			v0.4.9-alp
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 3bab5b48bf | ||
|  | f3bccee3b5 | ||
|  | d84b0b0f5d | ||
|  | d383302e8a | ||
|  | 04f40def2f | ||
|  | c48b7bc0f5 | ||
|  | b09daf5ec1 | ||
|  | c90c0ecef4 | ||
|  | 1ab5fb7d2d | ||
|  | f769711c19 | ||
|  | edc5156693 | ||
|  | 9ec6506c32 | ||
|  | f387cc5ead | ||
|  | 569b68c43b | ||
|  | f0c40a6cd0 | ||
|  | 0cea9e6a6f | ||
|  | b1b3651e84 | ||
|  | 8f6bd51f58 | ||
|  | bddbf57104 | ||
|  | 9a16b0f9e5 | ||
|  | 3530309a31 | ||
|  | 733ebc067b | ||
|  | 6a8567ac14 | ||
|  | aabc546691 | ||
|  | 1c82b06f35 | ||
|  | 9e4109672a | ||
|  | 64c35334e6 | ||
|  | 0ce572b405 | ||
|  | a326ac4b28 | ||
|  | 05b0e77839 | ||
|  | 51f19470bc | ||
|  | 737672fb0b | ||
|  | 0941e294bf | ||
|  | 431d505f79 | ||
|  | f0dc7f3f06 | ||
|  | 99fed1f850 | ||
|  | 4dc5388a80 | ||
|  | f81f4c60b2 | ||
|  | c613d8b6b2 | ||
|  | 7adac1c09c | ||
|  | 6f05128368 | ||
|  | 9b178a28a3 | ||
|  | 4a6a7f4635 | 
							
								
								
									
										4
									
								
								.github/ISSUE_TEMPLATE/bug_report.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/ISSUE_TEMPLATE/bug_report.md
									
									
									
									
										vendored
									
									
								
							| @@ -8,11 +8,13 @@ assignees: '' | |||||||
| --- | --- | ||||||
|  |  | ||||||
| **例行检查** | **例行检查** | ||||||
|  |  | ||||||
|  | [//]: # (方框内删除已有的空格,填 x 号) | ||||||
| + [ ] 我已确认目前没有类似 issue | + [ ] 我已确认目前没有类似 issue | ||||||
| + [ ] 我已确认我已升级到最新版本 | + [ ] 我已确认我已升级到最新版本 | ||||||
| + [ ] 我已完整查看过项目 README,尤其是常见问题部分 | + [ ] 我已完整查看过项目 README,尤其是常见问题部分 | ||||||
| + [ ] 我理解并愿意跟进此 issue,协助测试和提供反馈  | + [ ] 我理解并愿意跟进此 issue,协助测试和提供反馈  | ||||||
| + [ ] 我理解并认可上述内容,并理解项目维护者精力有限,不遵循规则的 issue 可能会被无视或直接关闭 | + [ ] 我理解并认可上述内容,并理解项目维护者精力有限,**不遵循规则的 issue 可能会被无视或直接关闭** | ||||||
|  |  | ||||||
| **问题描述** | **问题描述** | ||||||
|  |  | ||||||
|   | |||||||
							
								
								
									
										3
									
								
								.github/ISSUE_TEMPLATE/config.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								.github/ISSUE_TEMPLATE/config.yml
									
									
									
									
										vendored
									
									
								
							| @@ -6,6 +6,3 @@ contact_links: | |||||||
|   - name: 赞赏支持 |   - name: 赞赏支持 | ||||||
|     url: https://iamazing.cn/page/reward |     url: https://iamazing.cn/page/reward | ||||||
|     about: 请作者喝杯咖啡,以激励作者持续开发 |     about: 请作者喝杯咖啡,以激励作者持续开发 | ||||||
|   - name: 付费部署或定制功能 |  | ||||||
|     url: https://openai.justsong.cn/ |  | ||||||
|     about: 加群后联系群主 |  | ||||||
|   | |||||||
							
								
								
									
										5
									
								
								.github/ISSUE_TEMPLATE/feature_request.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								.github/ISSUE_TEMPLATE/feature_request.md
									
									
									
									
										vendored
									
									
								
							| @@ -8,10 +8,13 @@ assignees: '' | |||||||
| --- | --- | ||||||
|  |  | ||||||
| **例行检查** | **例行检查** | ||||||
|  |  | ||||||
|  | [//]: # (方框内删除已有的空格,填 x 号) | ||||||
| + [ ] 我已确认目前没有类似 issue | + [ ] 我已确认目前没有类似 issue | ||||||
| + [ ] 我已确认我已升级到最新版本 | + [ ] 我已确认我已升级到最新版本 | ||||||
|  | + [ ] 我已完整查看过项目 README,已确定现有版本无法满足需求 | ||||||
| + [ ] 我理解并愿意跟进此 issue,协助测试和提供反馈 | + [ ] 我理解并愿意跟进此 issue,协助测试和提供反馈 | ||||||
| + [ ] 我理解并认可上述内容,并理解项目维护者精力有限,不遵循规则的 issue 可能会被无视或直接关闭 | + [ ] 我理解并认可上述内容,并理解项目维护者精力有限,**不遵循规则的 issue 可能会被无视或直接关闭** | ||||||
|  |  | ||||||
| **功能描述** | **功能描述** | ||||||
|  |  | ||||||
|   | |||||||
							
								
								
									
										26
									
								
								README.en.md
									
									
									
									
									
								
							
							
						
						
									
										26
									
								
								README.en.md
									
									
									
									
									
								
							| @@ -10,7 +10,7 @@ | |||||||
|  |  | ||||||
| # One API | # One API | ||||||
|  |  | ||||||
| _✨ The all-in-one OpenAI interface, integrates various API access methods, ready to use ✨_ | _✨ An OpenAI key management & redistribution system, easy to deploy & use ✨_ | ||||||
|  |  | ||||||
| </div> | </div> | ||||||
|  |  | ||||||
| @@ -57,17 +57,14 @@ _✨ The all-in-one OpenAI interface, integrates various API access methods, rea | |||||||
| > **Note**: The latest image pulled from Docker may be an `alpha` release. Specify the version manually if you require stability. | > **Note**: The latest image pulled from Docker may be an `alpha` release. Specify the version manually if you require stability. | ||||||
|  |  | ||||||
| ## Features | ## Features | ||||||
| 1. Supports multiple API access channels. Welcome PRs or issue submissions for additional channels: | 1. Supports multiple API access channels: | ||||||
|     + [x] Official OpenAI channel (support proxy configuration) |     + [x] Official OpenAI channel (support proxy configuration) | ||||||
|     + [x] **Azure OpenAI API** |     + [x] **Azure OpenAI API** | ||||||
|  |     + [x] [API Distribute](https://api.gptjk.top/register?aff=QGxj) | ||||||
|     + [x] [OpenAI-SB](https://openai-sb.com) |     + [x] [OpenAI-SB](https://openai-sb.com) | ||||||
|     + [x] [API2D](https://api2d.com/r/197971) |     + [x] [API2D](https://api2d.com/r/197971) | ||||||
|     + [x] [OhMyGPT](https://aigptx.top?aff=uFpUl2Kf) |     + [x] [OhMyGPT](https://aigptx.top?aff=uFpUl2Kf) | ||||||
|     + [x] [AI Proxy](https://aiproxy.io/?i=OneAPI) (invitation code: `OneAPI`) |     + [x] [AI Proxy](https://aiproxy.io/?i=OneAPI) (invitation code: `OneAPI`) | ||||||
|     + [x] [API2GPT](http://console.api2gpt.com/m/00002S) |  | ||||||
|     + [x] [CloseAI](https://console.closeai-asia.com/r/2412) |  | ||||||
|     + [x] [AI.LS](https://ai.ls) |  | ||||||
|     + [x] [OpenAI Max](https://openaimax.com) |  | ||||||
|     + [x] Custom channel: Various third-party proxy services not included in the list |     + [x] Custom channel: Various third-party proxy services not included in the list | ||||||
| 2. Supports access to multiple channels through **load balancing**. | 2. Supports access to multiple channels through **load balancing**. | ||||||
| 3. Supports **stream mode** that enables typewriter-like effect through stream transmission. | 3. Supports **stream mode** that enables typewriter-like effect through stream transmission. | ||||||
| @@ -174,6 +171,15 @@ Refer to [#175](https://github.com/songquanpeng/one-api/issues/175) for detailed | |||||||
| If you encounter a blank page after deployment, refer to [#97](https://github.com/songquanpeng/one-api/issues/97) for possible solutions. | If you encounter a blank page after deployment, refer to [#97](https://github.com/songquanpeng/one-api/issues/97) for possible solutions. | ||||||
|  |  | ||||||
| ### Deployment on Third-Party Platforms | ### Deployment on Third-Party Platforms | ||||||
|  | <details> | ||||||
|  | <summary><strong>Deploy on Sealos</strong></summary> | ||||||
|  | <div> | ||||||
|  |  | ||||||
|  | Please refer to [this tutorial](https://github.com/c121914yu/FastGPT/blob/main/docs/deploy/one-api/sealos.md). | ||||||
|  |  | ||||||
|  | </div> | ||||||
|  | </details> | ||||||
|  |  | ||||||
| <details> | <details> | ||||||
| <summary><strong>Deployment on Zeabur</strong></summary> | <summary><strong>Deployment on Zeabur</strong></summary> | ||||||
| <div> | <div> | ||||||
| @@ -240,7 +246,7 @@ If the channel ID is not provided, load balancing will be used to distribute the | |||||||
|     + Example: `CHANNEL_UPDATE_FREQUENCY=1440` |     + Example: `CHANNEL_UPDATE_FREQUENCY=1440` | ||||||
| 8. `CHANNEL_TEST_FREQUENCY`: When set, it periodically tests the channels, with the unit in minutes. If not set, no test will happen. | 8. `CHANNEL_TEST_FREQUENCY`: When set, it periodically tests the channels, with the unit in minutes. If not set, no test will happen. | ||||||
|     + Example: `CHANNEL_TEST_FREQUENCY=1440` |     + Example: `CHANNEL_TEST_FREQUENCY=1440` | ||||||
| 9. `REQUEST_INTERVAL`: The time interval (in seconds) between requests when updating channel balances and testing channel availability. Default is no interval. | 9. `POLLING_INTERVAL`: The time interval (in seconds) between requests when updating channel balances and testing channel availability. Default is no interval. | ||||||
|     + Example: `POLLING_INTERVAL=5` |     + Example: `POLLING_INTERVAL=5` | ||||||
|  |  | ||||||
| ### Command Line Parameters | ### Command Line Parameters | ||||||
| @@ -279,6 +285,10 @@ If the channel ID is not provided, load balancing will be used to distribute the | |||||||
| ## Note | ## Note | ||||||
| This project is an open-source project. Please use it in compliance with OpenAI's [Terms of Use](https://openai.com/policies/terms-of-use) and **applicable laws and regulations**. It must not be used for illegal purposes. | This project is an open-source project. Please use it in compliance with OpenAI's [Terms of Use](https://openai.com/policies/terms-of-use) and **applicable laws and regulations**. It must not be used for illegal purposes. | ||||||
|  |  | ||||||
| This project is open-sourced under the MIT license. One must somehow retain the copyright information of One API. | This project is released under the MIT license. Based on this, attribution and a link to this project must be included at the bottom of the page. | ||||||
|  |  | ||||||
|  | The same applies to derivative projects based on this project. | ||||||
|  |  | ||||||
|  | If you do not wish to include attribution, prior authorization must be obtained. | ||||||
|  |  | ||||||
| According to the MIT license, users should bear the risk and responsibility of using this project, and the developer of this open-source project is not responsible for this. | According to the MIT license, users should bear the risk and responsibility of using this project, and the developer of this open-source project is not responsible for this. | ||||||
|   | |||||||
							
								
								
									
										59
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										59
									
								
								README.md
									
									
									
									
									
								
							| @@ -51,27 +51,27 @@ _✨ All in one 的 OpenAI 接口,整合各种 API 访问方式,开箱即用 | |||||||
|   <a href="https://iamazing.cn/page/reward">赞赏支持</a> |   <a href="https://iamazing.cn/page/reward">赞赏支持</a> | ||||||
| </p> | </p> | ||||||
|  |  | ||||||
|  | > **Note**:本项目为开源项目,请在遵循 OpenAI 的[使用条款](https://openai.com/policies/terms-of-use)以及**法律法规**的情况下使用,不得用于非法用途。 | ||||||
|  |  | ||||||
| > **Note**:使用 Docker 拉取的最新镜像可能是 `alpha` 版本,如果追求稳定性请手动指定版本。 | > **Note**:使用 Docker 拉取的最新镜像可能是 `alpha` 版本,如果追求稳定性请手动指定版本。 | ||||||
|  |  | ||||||
| > **Warning**:从 `v0.3` 版本升级到 `v0.4` 版本需要手动迁移数据库,请手动执行[数据库迁移脚本](./bin/migration_v0.3-v0.4.sql)。 | > **Warning**:从 `v0.3` 版本升级到 `v0.4` 版本需要手动迁移数据库,请手动执行[数据库迁移脚本](./bin/migration_v0.3-v0.4.sql)。 | ||||||
|  |  | ||||||
| ## 功能 | ## 功能 | ||||||
| 1. 支持多种 API 访问渠道,欢迎 PR 或提 issue 添加更多渠道: | 1. 支持多种 API 访问渠道: | ||||||
|    + [x] OpenAI 官方通道(支持配置代理) |    + [x] OpenAI 官方通道(支持配置镜像) | ||||||
|    + [x] **Azure OpenAI API** |    + [x] **Azure OpenAI API** | ||||||
|  |    + [x] [API Distribute](https://api.gptjk.top/register?aff=QGxj) | ||||||
|    + [x] [OpenAI-SB](https://openai-sb.com) |    + [x] [OpenAI-SB](https://openai-sb.com) | ||||||
|    + [x] [API2D](https://api2d.com/r/197971) |    + [x] [API2D](https://api2d.com/r/197971) | ||||||
|    + [x] [OhMyGPT](https://aigptx.top?aff=uFpUl2Kf) |    + [x] [OhMyGPT](https://aigptx.top?aff=uFpUl2Kf) | ||||||
|    + [x] [AI Proxy](https://aiproxy.io/?i=OneAPI) (邀请码:`OneAPI`) |    + [x] [AI Proxy](https://aiproxy.io/?i=OneAPI) (邀请码:`OneAPI`) | ||||||
|    + [x] [API2GPT](http://console.api2gpt.com/m/00002S) |  | ||||||
|    + [x] [CloseAI](https://console.closeai-asia.com/r/2412) |    + [x] [CloseAI](https://console.closeai-asia.com/r/2412) | ||||||
|    + [x] [AI.LS](https://ai.ls) |  | ||||||
|    + [x] [OpenAI Max](https://openaimax.com) |  | ||||||
|    + [x] 自定义渠道:例如各种未收录的第三方代理服务 |    + [x] 自定义渠道:例如各种未收录的第三方代理服务 | ||||||
| 2. 支持通过**负载均衡**的方式访问多个渠道。 | 2. 支持通过**负载均衡**的方式访问多个渠道。 | ||||||
| 3. 支持 **stream 模式**,可以通过流式传输实现打字机效果。 | 3. 支持 **stream 模式**,可以通过流式传输实现打字机效果。 | ||||||
| 4. 支持**多机部署**,[详见此处](#多机部署)。 | 4. 支持**多机部署**,[详见此处](#多机部署)。 | ||||||
| 5. 支持**令牌管理**,设置令牌的过期时间和使用次数。 | 5. 支持**令牌管理**,设置令牌的过期时间和额度。 | ||||||
| 6. 支持**兑换码管理**,支持批量生成和导出兑换码,可使用兑换码为账户进行充值。 | 6. 支持**兑换码管理**,支持批量生成和导出兑换码,可使用兑换码为账户进行充值。 | ||||||
| 7. 支持**通道管理**,批量创建通道。 | 7. 支持**通道管理**,批量创建通道。 | ||||||
| 8. 支持**用户分组**以及**渠道分组**,支持为不同分组设置不同的倍率。 | 8. 支持**用户分组**以及**渠道分组**,支持为不同分组设置不同的倍率。 | ||||||
| @@ -80,21 +80,26 @@ _✨ All in one 的 OpenAI 接口,整合各种 API 访问方式,开箱即用 | |||||||
| 11. 支持**用户邀请奖励**。 | 11. 支持**用户邀请奖励**。 | ||||||
| 12. 支持以美元为单位显示额度。 | 12. 支持以美元为单位显示额度。 | ||||||
| 13. 支持发布公告,设置充值链接,设置新用户初始额度。 | 13. 支持发布公告,设置充值链接,设置新用户初始额度。 | ||||||
| 14. 支持丰富的**自定义**设置, | 14. 支持模型映射,重定向用户的请求模型。 | ||||||
|  | 15. 支持丰富的**自定义**设置, | ||||||
|     1. 支持自定义系统名称,logo 以及页脚。 |     1. 支持自定义系统名称,logo 以及页脚。 | ||||||
|     2. 支持自定义首页和关于页面,可以选择使用 HTML & Markdown 代码进行自定义,或者使用一个单独的网页通过 iframe 嵌入。 |     2. 支持自定义首页和关于页面,可以选择使用 HTML & Markdown 代码进行自定义,或者使用一个单独的网页通过 iframe 嵌入。 | ||||||
| 15. 支持通过系统访问令牌访问管理 API。 | 16. 支持通过系统访问令牌访问管理 API。 | ||||||
| 16. 支持 Cloudflare Turnstile 用户校验。 | 17. 支持 Cloudflare Turnstile 用户校验。 | ||||||
| 17. 支持用户管理,支持**多种用户登录注册方式**: | 18. 支持用户管理,支持**多种用户登录注册方式**: | ||||||
|     + 邮箱登录注册以及通过邮箱进行密码重置。 |     + 邮箱登录注册以及通过邮箱进行密码重置。 | ||||||
|     + [GitHub 开放授权](https://github.com/settings/applications/new)。 |     + [GitHub 开放授权](https://github.com/settings/applications/new)。 | ||||||
|     + 微信公众号授权(需要额外部署 [WeChat Server](https://github.com/songquanpeng/wechat-server))。 |     + 微信公众号授权(需要额外部署 [WeChat Server](https://github.com/songquanpeng/wechat-server))。 | ||||||
| 18. 未来其他大模型开放 API 后,将第一时间支持,并将其封装成同样的 API 访问方式。 | 19. 未来其他大模型开放 API 后,将第一时间支持,并将其封装成同样的 API 访问方式。 | ||||||
|  |  | ||||||
| ## 部署 | ## 部署 | ||||||
| ### 基于 Docker 进行部署 | ### 基于 Docker 进行部署 | ||||||
| 部署命令:`docker run --name one-api -d --restart always -p 3000:3000 -e TZ=Asia/Shanghai -v /home/ubuntu/data/one-api:/data justsong/one-api` | 部署命令:`docker run --name one-api -d --restart always -p 3000:3000 -e TZ=Asia/Shanghai -v /home/ubuntu/data/one-api:/data justsong/one-api` | ||||||
|  |  | ||||||
|  | 如果上面的镜像无法拉取,可以尝试使用 GitHub 的 Docker 镜像,将上面的 `justsong/one-api` 替换为 `ghcr.io/songquanpeng/one-api` 即可。 | ||||||
|  |  | ||||||
|  | 如果你的并发量较大,推荐设置 `SQL_DSN`,详见下面[环境变量](#环境变量)一节。 | ||||||
|  |  | ||||||
| 更新命令:`docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower -cR` | 更新命令:`docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower -cR` | ||||||
|  |  | ||||||
| `-p 3000:3000` 中的第一个 `3000` 是宿主机的端口,可以根据需要进行修改。 | `-p 3000:3000` 中的第一个 `3000` 是宿主机的端口,可以根据需要进行修改。 | ||||||
| @@ -114,6 +119,7 @@ server{ | |||||||
|           proxy_set_header X-Forwarded-For $remote_addr; |           proxy_set_header X-Forwarded-For $remote_addr; | ||||||
|           proxy_cache_bypass $http_upgrade; |           proxy_cache_bypass $http_upgrade; | ||||||
|           proxy_set_header Accept-Encoding gzip; |           proxy_set_header Accept-Encoding gzip; | ||||||
|  |           proxy_read_timeout 300s;  # GPT-4 需要较长的超时时间,请自行调整 | ||||||
|    } |    } | ||||||
| } | } | ||||||
| ``` | ``` | ||||||
| @@ -159,8 +165,8 @@ sudo service nginx restart | |||||||
| ### 多机部署 | ### 多机部署 | ||||||
| 1. 所有服务器 `SESSION_SECRET` 设置一样的值。 | 1. 所有服务器 `SESSION_SECRET` 设置一样的值。 | ||||||
| 2. 必须设置 `SQL_DSN`,使用 MySQL 数据库而非 SQLite,所有服务器连接同一个数据库。 | 2. 必须设置 `SQL_DSN`,使用 MySQL 数据库而非 SQLite,所有服务器连接同一个数据库。 | ||||||
| 3. 所有从服务器必须设置 `NODE_TYPE` 为 `slave`。 | 3. 所有从服务器必须设置 `NODE_TYPE` 为 `slave`,不设置则默认为主服务器。 | ||||||
| 4. 设置 `SYNC_FREQUENCY` 后服务器将定期从数据库同步配置。 | 4. 设置 `SYNC_FREQUENCY` 后服务器将定期从数据库同步配置,在使用远程数据库的情况下,推荐设置该项并启用 Redis,无论主从。 | ||||||
| 5. 从服务器可以选择设置 `FRONTEND_BASE_URL`,以重定向页面请求到主服务器。 | 5. 从服务器可以选择设置 `FRONTEND_BASE_URL`,以重定向页面请求到主服务器。 | ||||||
| 6. 从服务器上**分别**装好 Redis,设置好 `REDIS_CONN_STRING`,这样可以做到在缓存未过期的情况下数据库零访问,可以减少延迟。 | 6. 从服务器上**分别**装好 Redis,设置好 `REDIS_CONN_STRING`,这样可以做到在缓存未过期的情况下数据库零访问,可以减少延迟。 | ||||||
| 7. 如果主服务器访问数据库延迟也比较高,则也需要启用 Redis,并设置 `SYNC_FREQUENCY`,以定期从数据库同步配置。 | 7. 如果主服务器访问数据库延迟也比较高,则也需要启用 Redis,并设置 `SYNC_FREQUENCY`,以定期从数据库同步配置。 | ||||||
| @@ -195,6 +201,17 @@ docker run --name chatgpt-web -d -p 3002:3002 -e OPENAI_API_BASE_URL=https://ope | |||||||
| 注意修改端口号、`OPENAI_API_BASE_URL` 和 `OPENAI_API_KEY`。 | 注意修改端口号、`OPENAI_API_BASE_URL` 和 `OPENAI_API_KEY`。 | ||||||
|  |  | ||||||
| ### 部署到第三方平台 | ### 部署到第三方平台 | ||||||
|  | <details> | ||||||
|  | <summary><strong>部署到 Sealos </strong></summary> | ||||||
|  | <div> | ||||||
|  |  | ||||||
|  | > Sealos 可视化部署,仅需 1 分钟。 | ||||||
|  |  | ||||||
|  | 参考这个[教程](https://github.com/c121914yu/FastGPT/blob/main/docs/deploy/one-api/sealos.md)中 1~5 步。 | ||||||
|  |  | ||||||
|  | </div> | ||||||
|  | </details> | ||||||
|  |  | ||||||
| <details> | <details> | ||||||
| <summary><strong>部署到 Zeabur</strong></summary> | <summary><strong>部署到 Zeabur</strong></summary> | ||||||
| <div> | <div> | ||||||
| @@ -221,6 +238,8 @@ docker run --name chatgpt-web -d -p 3002:3002 -e OPENAI_API_BASE_URL=https://ope | |||||||
|  |  | ||||||
| 等到系统启动后,使用 `root` 用户登录系统并做进一步的配置。 | 等到系统启动后,使用 `root` 用户登录系统并做进一步的配置。 | ||||||
|  |  | ||||||
|  | **Note**:如果你不知道某个配置项的含义,可以临时删掉值以看到进一步的提示文字。 | ||||||
|  |  | ||||||
| ## 使用方法 | ## 使用方法 | ||||||
| 在`渠道`页面中添加你的 API Key,之后在`令牌`页面中新增访问令牌。 | 在`渠道`页面中添加你的 API Key,之后在`令牌`页面中新增访问令牌。 | ||||||
|  |  | ||||||
| @@ -251,7 +270,10 @@ graph LR | |||||||
|    + 例子:`SESSION_SECRET=random_string` |    + 例子:`SESSION_SECRET=random_string` | ||||||
| 3. `SQL_DSN`:设置之后将使用指定数据库而非 SQLite,请使用 MySQL 8.0 版本。 | 3. `SQL_DSN`:设置之后将使用指定数据库而非 SQLite,请使用 MySQL 8.0 版本。 | ||||||
|    + 例子:`SQL_DSN=root:123456@tcp(localhost:3306)/oneapi` |    + 例子:`SQL_DSN=root:123456@tcp(localhost:3306)/oneapi` | ||||||
| 4. `FRONTEND_BASE_URL`:设置之后将使用指定的前端地址,而非后端地址。 |    + 注意需要提前建立数据库 `oneapi`,无需手动建表,程序将自动建表。 | ||||||
|  |    + 如果使用本地数据库:部署命令可添加 `--network="host"` 以使得容器内的程序可以访问到宿主机上的 MySQL。 | ||||||
|  |    + 如果使用云数据库:如果云服务器需要验证身份,需要在连接参数中添加 `?tls=skip-verify`。 | ||||||
|  | 4. `FRONTEND_BASE_URL`:设置之后将重定向页面请求到指定的地址,仅限从服务器设置。 | ||||||
|    + 例子:`FRONTEND_BASE_URL=https://openai.justsong.cn` |    + 例子:`FRONTEND_BASE_URL=https://openai.justsong.cn` | ||||||
| 5. `SYNC_FREQUENCY`:设置之后将定期与数据库同步配置,单位为秒,未设置则不进行同步。 | 5. `SYNC_FREQUENCY`:设置之后将定期与数据库同步配置,单位为秒,未设置则不进行同步。 | ||||||
|    + 例子:`SYNC_FREQUENCY=60` |    + 例子:`SYNC_FREQUENCY=60` | ||||||
| @@ -261,7 +283,7 @@ graph LR | |||||||
|    + 例子:`CHANNEL_UPDATE_FREQUENCY=1440` |    + 例子:`CHANNEL_UPDATE_FREQUENCY=1440` | ||||||
| 8. `CHANNEL_TEST_FREQUENCY`:设置之后将定期检查渠道,单位为分钟,未设置则不进行检查。 | 8. `CHANNEL_TEST_FREQUENCY`:设置之后将定期检查渠道,单位为分钟,未设置则不进行检查。 | ||||||
|    + 例子:`CHANNEL_TEST_FREQUENCY=1440` |    + 例子:`CHANNEL_TEST_FREQUENCY=1440` | ||||||
| 9. `REQUEST_INTERVAL`:批量更新渠道余额以及测试可用性时的请求间隔,单位为秒,默认无间隔。 | 9. `POLLING_INTERVAL`:批量更新渠道余额以及测试可用性时的请求间隔,单位为秒,默认无间隔。 | ||||||
|    + 例子:`POLLING_INTERVAL=5` |    + 例子:`POLLING_INTERVAL=5` | ||||||
|  |  | ||||||
| ### 命令行参数 | ### 命令行参数 | ||||||
| @@ -298,13 +320,16 @@ https://openai.justsong.cn | |||||||
| 5. ChatGPT Next Web 报错:`Failed to fetch` | 5. ChatGPT Next Web 报错:`Failed to fetch` | ||||||
|    + 部署的时候不要设置 `BASE_URL`。 |    + 部署的时候不要设置 `BASE_URL`。 | ||||||
|    + 检查你的接口地址和 API Key 有没有填对。 |    + 检查你的接口地址和 API Key 有没有填对。 | ||||||
|  | 6. 报错:`当前分组负载已饱和,请稍后再试` | ||||||
|  |    + 上游通道 429 了。 | ||||||
|  |  | ||||||
| ## 相关项目 | ## 相关项目 | ||||||
| [FastGPT](https://github.com/c121914yu/FastGPT): 三分钟搭建 AI 知识库 | [FastGPT](https://github.com/c121914yu/FastGPT): 三分钟搭建 AI 知识库 | ||||||
|  |  | ||||||
| ## 注意 | ## 注意 | ||||||
| 本项目为开源项目,请在遵循 OpenAI 的[使用条款](https://openai.com/policies/terms-of-use)以及**法律法规**的情况下使用,不得用于非法用途。 |  | ||||||
|  |  | ||||||
| 本项目使用 MIT 协议进行开源,请以某种方式保留 One API 的版权信息。 | 本项目使用 MIT 协议进行开源,**在此基础上**,必须在页面底部保留署名以及指向本项目的链接。如果不想保留署名,必须首先获得授权。 | ||||||
|  |  | ||||||
|  | 同样适用于基于本项目的二开项目。 | ||||||
|  |  | ||||||
| 依据 MIT 协议,使用者需自行承担使用本项目的风险与责任,本开源项目开发者与此无关。 | 依据 MIT 协议,使用者需自行承担使用本项目的风险与责任,本开源项目开发者与此无关。 | ||||||
| @@ -1,25 +1,29 @@ | |||||||
| #!/bin/bash | #!/bin/bash | ||||||
|  |  | ||||||
| if [ $# -ne 3 ]; then | if [ $# -lt 3 ]; then | ||||||
|   echo "Usage: time_test.sh <domain> <key> <count>" |   echo "Usage: time_test.sh <domain> <key> <count> [<model>]" | ||||||
|   exit 1 |   exit 1 | ||||||
| fi | fi | ||||||
|  |  | ||||||
| domain=$1 | domain=$1 | ||||||
| key=$2 | key=$2 | ||||||
| count=$3 | count=$3 | ||||||
|  | model=${4:-"gpt-3.5-turbo"} # 设置默认模型为 gpt-3.5-turbo | ||||||
|  |  | ||||||
| total_time=0 | total_time=0 | ||||||
| times=() | times=() | ||||||
|  |  | ||||||
| for ((i=1; i<=count; i++)); do | for ((i=1; i<=count; i++)); do | ||||||
|   result=$(curl -o /dev/null -s -w %{time_total}\\n \ |   result=$(curl -o /dev/null -s -w "%{http_code} %{time_total}\\n" \ | ||||||
|            https://"$domain"/v1/chat/completions \ |            https://"$domain"/v1/chat/completions \ | ||||||
|            -H "Content-Type: application/json" \ |            -H "Content-Type: application/json" \ | ||||||
|            -H "Authorization: Bearer $key" \ |            -H "Authorization: Bearer $key" \ | ||||||
|            -d '{"messages": [{"content": "echo hi", "role": "user"}], "model": "gpt-3.5-turbo", "stream": false, "max_tokens": 1}') |            -d '{"messages": [{"content": "echo hi", "role": "user"}], "model": "'"$model"'", "stream": false, "max_tokens": 1}') | ||||||
|   echo "$result" |   http_code=$(echo "$result" | awk '{print $1}') | ||||||
|   total_time=$(bc <<< "$total_time + $result") |   time=$(echo "$result" | awk '{print $2}') | ||||||
|   times+=("$result") |   echo "HTTP status code: $http_code, Time taken: $time" | ||||||
|  |   total_time=$(bc <<< "$total_time + $time") | ||||||
|  |   times+=("$time") | ||||||
| done | done | ||||||
|  |  | ||||||
| average_time=$(echo "scale=4; $total_time / $count" | bc) | average_time=$(echo "scale=4; $total_time / $count" | bc) | ||||||
|   | |||||||
| @@ -72,7 +72,7 @@ var RootUserEmail = "" | |||||||
|  |  | ||||||
| var IsMasterNode = os.Getenv("NODE_TYPE") != "slave" | var IsMasterNode = os.Getenv("NODE_TYPE") != "slave" | ||||||
|  |  | ||||||
| var requestInterval, _ = strconv.Atoi(os.Getenv("REQUEST_INTERVAL")) | var requestInterval, _ = strconv.Atoi(os.Getenv("POLLING_INTERVAL")) | ||||||
| var RequestInterval = time.Duration(requestInterval) * time.Second | var RequestInterval = time.Duration(requestInterval) * time.Second | ||||||
|  |  | ||||||
| const ( | const ( | ||||||
| @@ -148,20 +148,22 @@ const ( | |||||||
| 	ChannelTypeAIProxy   = 10 | 	ChannelTypeAIProxy   = 10 | ||||||
| 	ChannelTypePaLM      = 11 | 	ChannelTypePaLM      = 11 | ||||||
| 	ChannelTypeAPI2GPT   = 12 | 	ChannelTypeAPI2GPT   = 12 | ||||||
|  | 	ChannelTypeAIGC2D    = 13 | ||||||
| ) | ) | ||||||
|  |  | ||||||
| var ChannelBaseURLs = []string{ | var ChannelBaseURLs = []string{ | ||||||
| 	"",                             // 0 | 	"",                              // 0 | ||||||
| 	"https://api.openai.com",       // 1 | 	"https://api.openai.com",        // 1 | ||||||
| 	"https://oa.api2d.net",         // 2 | 	"https://oa.api2d.net",          // 2 | ||||||
| 	"",                             // 3 | 	"",                              // 3 | ||||||
| 	"https://api.openai-proxy.org", // 4 | 	"https://api.closeai-proxy.xyz", // 4 | ||||||
| 	"https://api.openai-sb.com",    // 5 | 	"https://api.openai-sb.com",     // 5 | ||||||
| 	"https://api.openaimax.com",    // 6 | 	"https://api.openaimax.com",     // 6 | ||||||
| 	"https://api.ohmygpt.com",      // 7 | 	"https://api.ohmygpt.com",       // 7 | ||||||
| 	"",                             // 8 | 	"",                              // 8 | ||||||
| 	"https://api.caipacity.com",    // 9 | 	"https://api.caipacity.com",     // 9 | ||||||
| 	"https://api.aiproxy.io",       // 10 | 	"https://api.aiproxy.io",        // 10 | ||||||
| 	"",                             // 11 | 	"",                              // 11 | ||||||
| 	"https://api.api2gpt.com",      // 12 | 	"https://api.api2gpt.com",       // 12 | ||||||
|  | 	"https://api.aigc2d.com",        // 13 | ||||||
| } | } | ||||||
|   | |||||||
| @@ -31,7 +31,7 @@ var ModelRatio = map[string]float64{ | |||||||
| 	"curie":                   10, | 	"curie":                   10, | ||||||
| 	"babbage":                 10, | 	"babbage":                 10, | ||||||
| 	"ada":                     10, | 	"ada":                     10, | ||||||
| 	"text-embedding-ada-002":  0.2, | 	"text-embedding-ada-002":  0.05, | ||||||
| 	"text-search-ada-doc-001": 10, | 	"text-search-ada-doc-001": 10, | ||||||
| 	"text-moderation-stable":  0.1, | 	"text-moderation-stable":  0.1, | ||||||
| 	"text-moderation-latest":  0.1, | 	"text-moderation-latest":  0.1, | ||||||
|   | |||||||
| @@ -33,7 +33,7 @@ func GetSubscription(c *gin.Context) { | |||||||
| 		amount /= common.QuotaPerUnit | 		amount /= common.QuotaPerUnit | ||||||
| 	} | 	} | ||||||
| 	if token != nil && token.UnlimitedQuota { | 	if token != nil && token.UnlimitedQuota { | ||||||
| 		amount = 99999999.9999 | 		amount = 100000000 | ||||||
| 	} | 	} | ||||||
| 	subscription := OpenAISubscriptionResponse{ | 	subscription := OpenAISubscriptionResponse{ | ||||||
| 		Object:             "billing_subscription", | 		Object:             "billing_subscription", | ||||||
|   | |||||||
| @@ -32,6 +32,13 @@ type OpenAIUsageDailyCost struct { | |||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | type OpenAICreditGrants struct { | ||||||
|  | 	Object         string  `json:"object"` | ||||||
|  | 	TotalGranted   float64 `json:"total_granted"` | ||||||
|  | 	TotalUsed      float64 `json:"total_used"` | ||||||
|  | 	TotalAvailable float64 `json:"total_available"` | ||||||
|  | } | ||||||
|  |  | ||||||
| type OpenAIUsageResponse struct { | type OpenAIUsageResponse struct { | ||||||
| 	Object string `json:"object"` | 	Object string `json:"object"` | ||||||
| 	//DailyCosts []OpenAIUsageDailyCost `json:"daily_costs"` | 	//DailyCosts []OpenAIUsageDailyCost `json:"daily_costs"` | ||||||
| @@ -61,6 +68,14 @@ type API2GPTUsageResponse struct { | |||||||
| 	TotalRemaining float64 `json:"total_remaining"` | 	TotalRemaining float64 `json:"total_remaining"` | ||||||
| } | } | ||||||
|  |  | ||||||
|  | type APGC2DGPTUsageResponse struct { | ||||||
|  | 	//Grants         interface{} `json:"grants"` | ||||||
|  | 	Object         string  `json:"object"` | ||||||
|  | 	TotalAvailable float64 `json:"total_available"` | ||||||
|  | 	TotalGranted   float64 `json:"total_granted"` | ||||||
|  | 	TotalUsed      float64 `json:"total_used"` | ||||||
|  | } | ||||||
|  |  | ||||||
| // GetAuthHeader get auth header | // GetAuthHeader get auth header | ||||||
| func GetAuthHeader(token string) http.Header { | func GetAuthHeader(token string) http.Header { | ||||||
| 	h := http.Header{} | 	h := http.Header{} | ||||||
| @@ -92,6 +107,22 @@ func GetResponseBody(method, url string, channel *model.Channel, headers http.He | |||||||
| 	return body, nil | 	return body, nil | ||||||
| } | } | ||||||
|  |  | ||||||
|  | func updateChannelCloseAIBalance(channel *model.Channel) (float64, error) { | ||||||
|  | 	url := fmt.Sprintf("%s/dashboard/billing/credit_grants", channel.BaseURL) | ||||||
|  | 	body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key)) | ||||||
|  |  | ||||||
|  | 	if err != nil { | ||||||
|  | 		return 0, err | ||||||
|  | 	} | ||||||
|  | 	response := OpenAICreditGrants{} | ||||||
|  | 	err = json.Unmarshal(body, &response) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return 0, err | ||||||
|  | 	} | ||||||
|  | 	channel.UpdateBalance(response.TotalAvailable) | ||||||
|  | 	return response.TotalAvailable, nil | ||||||
|  | } | ||||||
|  |  | ||||||
| func updateChannelOpenAISBBalance(channel *model.Channel) (float64, error) { | func updateChannelOpenAISBBalance(channel *model.Channel) (float64, error) { | ||||||
| 	url := fmt.Sprintf("https://api.openai-sb.com/sb-api/user/status?api_key=%s", channel.Key) | 	url := fmt.Sprintf("https://api.openai-sb.com/sb-api/user/status?api_key=%s", channel.Key) | ||||||
| 	body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key)) | 	body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key)) | ||||||
| @@ -150,8 +181,26 @@ func updateChannelAPI2GPTBalance(channel *model.Channel) (float64, error) { | |||||||
| 	return response.TotalRemaining, nil | 	return response.TotalRemaining, nil | ||||||
| } | } | ||||||
|  |  | ||||||
|  | func updateChannelAIGC2DBalance(channel *model.Channel) (float64, error) { | ||||||
|  | 	url := "https://api.aigc2d.com/dashboard/billing/credit_grants" | ||||||
|  | 	body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key)) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return 0, err | ||||||
|  | 	} | ||||||
|  | 	response := APGC2DGPTUsageResponse{} | ||||||
|  | 	err = json.Unmarshal(body, &response) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return 0, err | ||||||
|  | 	} | ||||||
|  | 	channel.UpdateBalance(response.TotalAvailable) | ||||||
|  | 	return response.TotalAvailable, nil | ||||||
|  | } | ||||||
|  |  | ||||||
| func updateChannelBalance(channel *model.Channel) (float64, error) { | func updateChannelBalance(channel *model.Channel) (float64, error) { | ||||||
| 	baseURL := common.ChannelBaseURLs[channel.Type] | 	baseURL := common.ChannelBaseURLs[channel.Type] | ||||||
|  | 	if channel.BaseURL == "" { | ||||||
|  | 		channel.BaseURL = baseURL | ||||||
|  | 	} | ||||||
| 	switch channel.Type { | 	switch channel.Type { | ||||||
| 	case common.ChannelTypeOpenAI: | 	case common.ChannelTypeOpenAI: | ||||||
| 		if channel.BaseURL != "" { | 		if channel.BaseURL != "" { | ||||||
| @@ -161,12 +210,16 @@ func updateChannelBalance(channel *model.Channel) (float64, error) { | |||||||
| 		return 0, errors.New("尚未实现") | 		return 0, errors.New("尚未实现") | ||||||
| 	case common.ChannelTypeCustom: | 	case common.ChannelTypeCustom: | ||||||
| 		baseURL = channel.BaseURL | 		baseURL = channel.BaseURL | ||||||
|  | 	case common.ChannelTypeCloseAI: | ||||||
|  | 		return updateChannelCloseAIBalance(channel) | ||||||
| 	case common.ChannelTypeOpenAISB: | 	case common.ChannelTypeOpenAISB: | ||||||
| 		return updateChannelOpenAISBBalance(channel) | 		return updateChannelOpenAISBBalance(channel) | ||||||
| 	case common.ChannelTypeAIProxy: | 	case common.ChannelTypeAIProxy: | ||||||
| 		return updateChannelAIProxyBalance(channel) | 		return updateChannelAIProxyBalance(channel) | ||||||
| 	case common.ChannelTypeAPI2GPT: | 	case common.ChannelTypeAPI2GPT: | ||||||
| 		return updateChannelAPI2GPTBalance(channel) | 		return updateChannelAPI2GPTBalance(channel) | ||||||
|  | 	case common.ChannelTypeAIGC2D: | ||||||
|  | 		return updateChannelAIGC2DBalance(channel) | ||||||
| 	default: | 	default: | ||||||
| 		return 0, errors.New("尚未实现") | 		return 0, errors.New("尚未实现") | ||||||
| 	} | 	} | ||||||
|   | |||||||
| @@ -224,6 +224,24 @@ func init() { | |||||||
| 			Root:       "text-moderation-stable", | 			Root:       "text-moderation-stable", | ||||||
| 			Parent:     nil, | 			Parent:     nil, | ||||||
| 		}, | 		}, | ||||||
|  | 		{ | ||||||
|  | 			Id:         "text-davinci-edit-001", | ||||||
|  | 			Object:     "model", | ||||||
|  | 			Created:    1677649963, | ||||||
|  | 			OwnedBy:    "openai", | ||||||
|  | 			Permission: permission, | ||||||
|  | 			Root:       "text-davinci-edit-001", | ||||||
|  | 			Parent:     nil, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			Id:         "code-davinci-edit-001", | ||||||
|  | 			Object:     "model", | ||||||
|  | 			Created:    1677649963, | ||||||
|  | 			OwnedBy:    "openai", | ||||||
|  | 			Permission: permission, | ||||||
|  | 			Root:       "code-davinci-edit-001", | ||||||
|  | 			Parent:     nil, | ||||||
|  | 		}, | ||||||
| 	} | 	} | ||||||
| 	openAIModelsMap = make(map[string]OpenAIModels) | 	openAIModelsMap = make(map[string]OpenAIModels) | ||||||
| 	for _, model := range openAIModels { | 	for _, model := range openAIModels { | ||||||
|   | |||||||
| @@ -27,7 +27,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | |||||||
| 			return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest) | 			return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	if relayMode == RelayModeModeration && textRequest.Model == "" { | 	if relayMode == RelayModeModerations && textRequest.Model == "" { | ||||||
| 		textRequest.Model = "text-moderation-latest" | 		textRequest.Model = "text-moderation-latest" | ||||||
| 	} | 	} | ||||||
| 	// request validation | 	// request validation | ||||||
| @@ -37,16 +37,34 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | |||||||
| 	switch relayMode { | 	switch relayMode { | ||||||
| 	case RelayModeCompletions: | 	case RelayModeCompletions: | ||||||
| 		if textRequest.Prompt == "" { | 		if textRequest.Prompt == "" { | ||||||
| 			return errorWrapper(errors.New("prompt is required"), "required_field_missing", http.StatusBadRequest) | 			return errorWrapper(errors.New("field prompt is required"), "required_field_missing", http.StatusBadRequest) | ||||||
| 		} | 		} | ||||||
| 	case RelayModeChatCompletions: | 	case RelayModeChatCompletions: | ||||||
| 		if len(textRequest.Messages) == 0 { | 		if textRequest.Messages == nil || len(textRequest.Messages) == 0 { | ||||||
| 			return errorWrapper(errors.New("messages is required"), "required_field_missing", http.StatusBadRequest) | 			return errorWrapper(errors.New("field messages is required"), "required_field_missing", http.StatusBadRequest) | ||||||
| 		} | 		} | ||||||
| 	case RelayModeEmbeddings: | 	case RelayModeEmbeddings: | ||||||
| 	case RelayModeModeration: | 	case RelayModeModerations: | ||||||
| 		if textRequest.Input == "" { | 		if textRequest.Input == "" { | ||||||
| 			return errorWrapper(errors.New("input is required"), "required_field_missing", http.StatusBadRequest) | 			return errorWrapper(errors.New("field input is required"), "required_field_missing", http.StatusBadRequest) | ||||||
|  | 		} | ||||||
|  | 	case RelayModeEdits: | ||||||
|  | 		if textRequest.Instruction == "" { | ||||||
|  | 			return errorWrapper(errors.New("field instruction is required"), "required_field_missing", http.StatusBadRequest) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// map model name | ||||||
|  | 	modelMapping := c.GetString("model_mapping") | ||||||
|  | 	isModelMapped := false | ||||||
|  | 	if modelMapping != "" { | ||||||
|  | 		modelMap := make(map[string]string) | ||||||
|  | 		err := json.Unmarshal([]byte(modelMapping), &modelMap) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return errorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError) | ||||||
|  | 		} | ||||||
|  | 		if modelMap[textRequest.Model] != "" { | ||||||
|  | 			textRequest.Model = modelMap[textRequest.Model] | ||||||
|  | 			isModelMapped = true | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	baseURL := common.ChannelBaseURLs[channelType] | 	baseURL := common.ChannelBaseURLs[channelType] | ||||||
| @@ -84,7 +102,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | |||||||
| 		promptTokens = countTokenMessages(textRequest.Messages, textRequest.Model) | 		promptTokens = countTokenMessages(textRequest.Messages, textRequest.Model) | ||||||
| 	case RelayModeCompletions: | 	case RelayModeCompletions: | ||||||
| 		promptTokens = countTokenInput(textRequest.Prompt, textRequest.Model) | 		promptTokens = countTokenInput(textRequest.Prompt, textRequest.Model) | ||||||
| 	case RelayModeModeration: | 	case RelayModeModerations: | ||||||
| 		promptTokens = countTokenInput(textRequest.Input, textRequest.Model) | 		promptTokens = countTokenInput(textRequest.Input, textRequest.Model) | ||||||
| 	} | 	} | ||||||
| 	preConsumedTokens := common.PreConsumedQuota | 	preConsumedTokens := common.PreConsumedQuota | ||||||
| @@ -110,7 +128,17 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | |||||||
| 			return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden) | 			return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	req, err := http.NewRequest(c.Request.Method, fullRequestURL, c.Request.Body) | 	var requestBody io.Reader | ||||||
|  | 	if isModelMapped { | ||||||
|  | 		jsonStr, err := json.Marshal(textRequest) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError) | ||||||
|  | 		} | ||||||
|  | 		requestBody = bytes.NewBuffer(jsonStr) | ||||||
|  | 	} else { | ||||||
|  | 		requestBody = c.Request.Body | ||||||
|  | 	} | ||||||
|  | 	req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return errorWrapper(err, "new_request_failed", http.StatusInternalServerError) | 		return errorWrapper(err, "new_request_failed", http.StatusInternalServerError) | ||||||
| 	} | 	} | ||||||
| @@ -144,7 +172,10 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | |||||||
| 	defer func() { | 	defer func() { | ||||||
| 		if consumeQuota { | 		if consumeQuota { | ||||||
| 			quota := 0 | 			quota := 0 | ||||||
| 			completionRatio := 1.333333 // default for gpt-3 | 			completionRatio := 1.0 | ||||||
|  | 			if strings.HasPrefix(textRequest.Model, "gpt-3.5") { | ||||||
|  | 				completionRatio = 1.333333 | ||||||
|  | 			} | ||||||
| 			if strings.HasPrefix(textRequest.Model, "gpt-4") { | 			if strings.HasPrefix(textRequest.Model, "gpt-4") { | ||||||
| 				completionRatio = 2 | 				completionRatio = 2 | ||||||
| 			} | 			} | ||||||
| @@ -170,6 +201,10 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | |||||||
| 			if err != nil { | 			if err != nil { | ||||||
| 				common.SysError("error consuming token remain quota: " + err.Error()) | 				common.SysError("error consuming token remain quota: " + err.Error()) | ||||||
| 			} | 			} | ||||||
|  | 			err = model.CacheUpdateUserQuota(userId) | ||||||
|  | 			if err != nil { | ||||||
|  | 				common.SysError("error update user quota cache: " + err.Error()) | ||||||
|  | 			} | ||||||
| 			if quota != 0 { | 			if quota != 0 { | ||||||
| 				tokenName := c.GetString("token_name") | 				tokenName := c.GetString("token_name") | ||||||
| 				logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio) | 				logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio) | ||||||
|   | |||||||
| @@ -4,7 +4,6 @@ import ( | |||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"github.com/pkoukk/tiktoken-go" | 	"github.com/pkoukk/tiktoken-go" | ||||||
| 	"one-api/common" | 	"one-api/common" | ||||||
| 	"strings" |  | ||||||
| ) | ) | ||||||
|  |  | ||||||
| var tokenEncoderMap = map[string]*tiktoken.Tiktoken{} | var tokenEncoderMap = map[string]*tiktoken.Tiktoken{} | ||||||
| @@ -34,12 +33,9 @@ func countTokenMessages(messages []Message, model string) int { | |||||||
| 	// Every message follows <|start|>{role/name}\n{content}<|end|>\n | 	// Every message follows <|start|>{role/name}\n{content}<|end|>\n | ||||||
| 	var tokensPerMessage int | 	var tokensPerMessage int | ||||||
| 	var tokensPerName int | 	var tokensPerName int | ||||||
| 	if strings.HasPrefix(model, "gpt-3.5") { | 	if model == "gpt-3.5-turbo-0301" { | ||||||
| 		tokensPerMessage = 4 | 		tokensPerMessage = 4 | ||||||
| 		tokensPerName = -1 // If there's a name, the role is omitted | 		tokensPerName = -1 // If there's a name, the role is omitted | ||||||
| 	} else if strings.HasPrefix(model, "gpt-4") { |  | ||||||
| 		tokensPerMessage = 3 |  | ||||||
| 		tokensPerName = 1 |  | ||||||
| 	} else { | 	} else { | ||||||
| 		tokensPerMessage = 3 | 		tokensPerMessage = 3 | ||||||
| 		tokensPerName = 1 | 		tokensPerName = 1 | ||||||
|   | |||||||
| @@ -19,22 +19,24 @@ const ( | |||||||
| 	RelayModeChatCompletions | 	RelayModeChatCompletions | ||||||
| 	RelayModeCompletions | 	RelayModeCompletions | ||||||
| 	RelayModeEmbeddings | 	RelayModeEmbeddings | ||||||
| 	RelayModeModeration | 	RelayModeModerations | ||||||
| 	RelayModeImagesGenerations | 	RelayModeImagesGenerations | ||||||
|  | 	RelayModeEdits | ||||||
| ) | ) | ||||||
|  |  | ||||||
| // https://platform.openai.com/docs/api-reference/chat | // https://platform.openai.com/docs/api-reference/chat | ||||||
|  |  | ||||||
| type GeneralOpenAIRequest struct { | type GeneralOpenAIRequest struct { | ||||||
| 	Model       string    `json:"model"` | 	Model       string    `json:"model,omitempty"` | ||||||
| 	Messages    []Message `json:"messages"` | 	Messages    []Message `json:"messages,omitempty"` | ||||||
| 	Prompt      any       `json:"prompt"` | 	Prompt      any       `json:"prompt,omitempty"` | ||||||
| 	Stream      bool      `json:"stream"` | 	Stream      bool      `json:"stream,omitempty"` | ||||||
| 	MaxTokens   int       `json:"max_tokens"` | 	MaxTokens   int       `json:"max_tokens,omitempty"` | ||||||
| 	Temperature float64   `json:"temperature"` | 	Temperature float64   `json:"temperature,omitempty"` | ||||||
| 	TopP        float64   `json:"top_p"` | 	TopP        float64   `json:"top_p,omitempty"` | ||||||
| 	N           int       `json:"n"` | 	N           int       `json:"n,omitempty"` | ||||||
| 	Input       any       `json:"input"` | 	Input       any       `json:"input,omitempty"` | ||||||
|  | 	Instruction string    `json:"instruction,omitempty"` | ||||||
| } | } | ||||||
|  |  | ||||||
| type ChatRequest struct { | type ChatRequest struct { | ||||||
| @@ -99,9 +101,11 @@ func Relay(c *gin.Context) { | |||||||
| 	} else if strings.HasPrefix(c.Request.URL.Path, "/v1/embeddings") { | 	} else if strings.HasPrefix(c.Request.URL.Path, "/v1/embeddings") { | ||||||
| 		relayMode = RelayModeEmbeddings | 		relayMode = RelayModeEmbeddings | ||||||
| 	} else if strings.HasPrefix(c.Request.URL.Path, "/v1/moderations") { | 	} else if strings.HasPrefix(c.Request.URL.Path, "/v1/moderations") { | ||||||
| 		relayMode = RelayModeModeration | 		relayMode = RelayModeModerations | ||||||
| 	} else if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") { | 	} else if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") { | ||||||
| 		relayMode = RelayModeImagesGenerations | 		relayMode = RelayModeImagesGenerations | ||||||
|  | 	} else if strings.HasPrefix(c.Request.URL.Path, "/v1/edits") { | ||||||
|  | 		relayMode = RelayModeEdits | ||||||
| 	} | 	} | ||||||
| 	var err *OpenAIErrorWithStatusCode | 	var err *OpenAIErrorWithStatusCode | ||||||
| 	switch relayMode { | 	switch relayMode { | ||||||
|   | |||||||
| @@ -180,10 +180,10 @@ func UpdateToken(c *gin.Context) { | |||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 	if token.Status == common.TokenStatusEnabled { | 	if token.Status == common.TokenStatusEnabled { | ||||||
| 		if cleanToken.Status == common.TokenStatusExpired && cleanToken.ExpiredTime <= common.GetTimestamp() { | 		if cleanToken.Status == common.TokenStatusExpired && cleanToken.ExpiredTime <= common.GetTimestamp() && cleanToken.ExpiredTime != -1 { | ||||||
| 			c.JSON(http.StatusOK, gin.H{ | 			c.JSON(http.StatusOK, gin.H{ | ||||||
| 				"success": false, | 				"success": false, | ||||||
| 				"message": "令牌已过期,无法启用,请先修改令牌过期时间", | 				"message": "令牌已过期,无法启用,请先修改令牌过期时间,或者设置为永不过期", | ||||||
| 			}) | 			}) | ||||||
| 			return | 			return | ||||||
| 		} | 		} | ||||||
|   | |||||||
| @@ -36,7 +36,7 @@ | |||||||
|   "通过令牌「%s」使用模型 %s 消耗 %s(模型倍率 %.2f,分组倍率 %.2f)": "Using model %s with token %s consumes %s (model rate %.2f, group rate %.2f)", |   "通过令牌「%s」使用模型 %s 消耗 %s(模型倍率 %.2f,分组倍率 %.2f)": "Using model %s with token %s consumes %s (model rate %.2f, group rate %.2f)", | ||||||
|   "当前分组负载已饱和,请稍后再试,或升级账户以提升服务质量。": "The current group load is saturated, please try again later, or upgrade your account to improve service quality.", |   "当前分组负载已饱和,请稍后再试,或升级账户以提升服务质量。": "The current group load is saturated, please try again later, or upgrade your account to improve service quality.", | ||||||
|   "令牌名称长度必须在1-20之间": "The length of the token name must be between 1-20", |   "令牌名称长度必须在1-20之间": "The length of the token name must be between 1-20", | ||||||
|   "令牌已过期,无法启用,请先修改令牌过期时间": "The token has expired and cannot be enabled. Please modify the token expiration time first", |   "令牌已过期,无法启用,请先修改令牌过期时间,或者设置为永不过期": "The token has expired and cannot be enabled. Please modify the expiration time of the token, or set it to never expire.", | ||||||
|   "令牌可用额度已用尽,无法启用,请先修改令牌剩余额度,或者设置为无限额度": "The available quota of the token has been used up and cannot be enabled. Please modify the remaining quota of the token, or set it to unlimited quota", |   "令牌可用额度已用尽,无法启用,请先修改令牌剩余额度,或者设置为无限额度": "The available quota of the token has been used up and cannot be enabled. Please modify the remaining quota of the token, or set it to unlimited quota", | ||||||
|   "管理员关闭了密码登录": "The administrator has turned off password login", |   "管理员关闭了密码登录": "The administrator has turned off password login", | ||||||
|   "无法保存会话信息,请重试": "Unable to save session information, please try again", |   "无法保存会话信息,请重试": "Unable to save session information, please try again", | ||||||
| @@ -456,5 +456,7 @@ | |||||||
|   "提示": "Prompt", |   "提示": "Prompt", | ||||||
|   "补全": "Completion", |   "补全": "Completion", | ||||||
|   "消耗额度": "Used Quota", |   "消耗额度": "Used Quota", | ||||||
|   "可选值": "Optional Values" |   "可选值": "Optional Values", | ||||||
|  |   "渠道不存在:%d": "Channel does not exist: %d", | ||||||
|  |   "数据库一致性已被破坏,请联系管理员": "Database consistency has been broken, please contact the administrator" | ||||||
| } | } | ||||||
|   | |||||||
							
								
								
									
										11
									
								
								main.go
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								main.go
									
									
									
									
									
								
							| @@ -4,7 +4,6 @@ import ( | |||||||
| 	"embed" | 	"embed" | ||||||
| 	"github.com/gin-contrib/sessions" | 	"github.com/gin-contrib/sessions" | ||||||
| 	"github.com/gin-contrib/sessions/cookie" | 	"github.com/gin-contrib/sessions/cookie" | ||||||
| 	"github.com/gin-contrib/sessions/redis" |  | ||||||
| 	"github.com/gin-gonic/gin" | 	"github.com/gin-gonic/gin" | ||||||
| 	"one-api/common" | 	"one-api/common" | ||||||
| 	"one-api/controller" | 	"one-api/controller" | ||||||
| @@ -82,14 +81,8 @@ func main() { | |||||||
| 	server.Use(middleware.CORS()) | 	server.Use(middleware.CORS()) | ||||||
|  |  | ||||||
| 	// Initialize session store | 	// Initialize session store | ||||||
| 	if common.RedisEnabled { | 	store := cookie.NewStore([]byte(common.SessionSecret)) | ||||||
| 		opt := common.ParseRedisOption() | 	server.Use(sessions.Sessions("session", store)) | ||||||
| 		store, _ := redis.NewStore(opt.MinIdleConns, opt.Network, opt.Addr, opt.Password, []byte(common.SessionSecret)) |  | ||||||
| 		server.Use(sessions.Sessions("session", store)) |  | ||||||
| 	} else { |  | ||||||
| 		store := cookie.NewStore([]byte(common.SessionSecret)) |  | ||||||
| 		server.Use(sessions.Sessions("session", store)) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	router.SetRouter(server, buildFS, indexPage) | 	router.SetRouter(server, buildFS, indexPage) | ||||||
| 	var port = os.Getenv("PORT") | 	var port = os.Getenv("PORT") | ||||||
|   | |||||||
| @@ -75,9 +75,14 @@ func Distribute() func(c *gin.Context) { | |||||||
| 			} | 			} | ||||||
| 			channel, err = model.CacheGetRandomSatisfiedChannel(userGroup, modelRequest.Model) | 			channel, err = model.CacheGetRandomSatisfiedChannel(userGroup, modelRequest.Model) | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
|  | 				message := "无可用渠道" | ||||||
|  | 				if channel != nil { | ||||||
|  | 					common.SysError(fmt.Sprintf("渠道不存在:%d", channel.Id)) | ||||||
|  | 					message = "数据库一致性已被破坏,请联系管理员" | ||||||
|  | 				} | ||||||
| 				c.JSON(http.StatusServiceUnavailable, gin.H{ | 				c.JSON(http.StatusServiceUnavailable, gin.H{ | ||||||
| 					"error": gin.H{ | 					"error": gin.H{ | ||||||
| 						"message": "无可用渠道", | 						"message": message, | ||||||
| 						"type":    "one_api_error", | 						"type":    "one_api_error", | ||||||
| 					}, | 					}, | ||||||
| 				}) | 				}) | ||||||
| @@ -88,6 +93,7 @@ func Distribute() func(c *gin.Context) { | |||||||
| 		c.Set("channel", channel.Type) | 		c.Set("channel", channel.Type) | ||||||
| 		c.Set("channel_id", channel.Id) | 		c.Set("channel_id", channel.Id) | ||||||
| 		c.Set("channel_name", channel.Name) | 		c.Set("channel_name", channel.Name) | ||||||
|  | 		c.Set("model_mapping", channel.ModelMapping) | ||||||
| 		c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key)) | 		c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key)) | ||||||
| 		c.Set("base_url", channel.BaseURL) | 		c.Set("base_url", channel.BaseURL) | ||||||
| 		if channel.Type == common.ChannelTypeAzure { | 		if channel.Type == common.ChannelTypeAzure { | ||||||
|   | |||||||
| @@ -24,6 +24,7 @@ func GetRandomSatisfiedChannel(group string, model string) (*Channel, error) { | |||||||
| 		return nil, err | 		return nil, err | ||||||
| 	} | 	} | ||||||
| 	channel := Channel{} | 	channel := Channel{} | ||||||
|  | 	channel.Id = ability.ChannelId | ||||||
| 	err = DB.First(&channel, "id = ?", ability.ChannelId).Error | 	err = DB.First(&channel, "id = ?", ability.ChannelId).Error | ||||||
| 	return &channel, err | 	return &channel, err | ||||||
| } | } | ||||||
|   | |||||||
| @@ -83,6 +83,18 @@ func CacheGetUserQuota(id int) (quota int, err error) { | |||||||
| 	return quota, err | 	return quota, err | ||||||
| } | } | ||||||
|  |  | ||||||
|  | func CacheUpdateUserQuota(id int) error { | ||||||
|  | 	if !common.RedisEnabled { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	quota, err := GetUserQuota(id) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	err = common.RedisSet(fmt.Sprintf("user_quota:%d", id), fmt.Sprintf("%d", quota), UserId2QuotaCacheSeconds*time.Second) | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  |  | ||||||
| func CacheIsUserEnabled(userId int) bool { | func CacheIsUserEnabled(userId int) bool { | ||||||
| 	if !common.RedisEnabled { | 	if !common.RedisEnabled { | ||||||
| 		return IsUserEnabled(userId) | 		return IsUserEnabled(userId) | ||||||
| @@ -108,7 +120,7 @@ var channelSyncLock sync.RWMutex | |||||||
| func InitChannelCache() { | func InitChannelCache() { | ||||||
| 	newChannelId2channel := make(map[int]*Channel) | 	newChannelId2channel := make(map[int]*Channel) | ||||||
| 	var channels []*Channel | 	var channels []*Channel | ||||||
| 	DB.Find(&channels) | 	DB.Where("status = ?", common.ChannelStatusEnabled).Find(&channels) | ||||||
| 	for _, channel := range channels { | 	for _, channel := range channels { | ||||||
| 		newChannelId2channel[channel.Id] = channel | 		newChannelId2channel[channel.Id] = channel | ||||||
| 	} | 	} | ||||||
|   | |||||||
| @@ -22,6 +22,7 @@ type Channel struct { | |||||||
| 	Models             string  `json:"models"` | 	Models             string  `json:"models"` | ||||||
| 	Group              string  `json:"group" gorm:"type:varchar(32);default:'default'"` | 	Group              string  `json:"group" gorm:"type:varchar(32);default:'default'"` | ||||||
| 	UsedQuota          int64   `json:"used_quota" gorm:"bigint;default:0"` | 	UsedQuota          int64   `json:"used_quota" gorm:"bigint;default:0"` | ||||||
|  | 	ModelMapping       string  `json:"model_mapping" gorm:"type:varchar(1024);default:''"` | ||||||
| } | } | ||||||
|  |  | ||||||
| func GetAllChannels(startIdx int, num int, selectAll bool) ([]*Channel, error) { | func GetAllChannels(startIdx int, num int, selectAll bool) ([]*Channel, error) { | ||||||
| @@ -36,7 +37,7 @@ func GetAllChannels(startIdx int, num int, selectAll bool) ([]*Channel, error) { | |||||||
| } | } | ||||||
|  |  | ||||||
| func SearchChannels(keyword string) (channels []*Channel, err error) { | func SearchChannels(keyword string) (channels []*Channel, err error) { | ||||||
| 	err = DB.Omit("key").Where("id = ? or name LIKE ? or key = ?", keyword, keyword+"%", keyword).Find(&channels).Error | 	err = DB.Omit("key").Where("id = ? or name LIKE ? or `key` = ?", keyword, keyword+"%", keyword).Find(&channels).Error | ||||||
| 	return channels, err | 	return channels, err | ||||||
| } | } | ||||||
|  |  | ||||||
|   | |||||||
| @@ -5,6 +5,7 @@ import ( | |||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"github.com/gin-gonic/gin" | 	"github.com/gin-gonic/gin" | ||||||
| 	"net/http" | 	"net/http" | ||||||
|  | 	"one-api/common" | ||||||
| 	"os" | 	"os" | ||||||
| 	"strings" | 	"strings" | ||||||
| ) | ) | ||||||
| @@ -14,6 +15,10 @@ func SetRouter(router *gin.Engine, buildFS embed.FS, indexPage []byte) { | |||||||
| 	SetDashboardRouter(router) | 	SetDashboardRouter(router) | ||||||
| 	SetRelayRouter(router) | 	SetRelayRouter(router) | ||||||
| 	frontendBaseUrl := os.Getenv("FRONTEND_BASE_URL") | 	frontendBaseUrl := os.Getenv("FRONTEND_BASE_URL") | ||||||
|  | 	if common.IsMasterNode && frontendBaseUrl != "" { | ||||||
|  | 		frontendBaseUrl = "" | ||||||
|  | 		common.SysLog("FRONTEND_BASE_URL is ignored on master node") | ||||||
|  | 	} | ||||||
| 	if frontendBaseUrl == "" { | 	if frontendBaseUrl == "" { | ||||||
| 		SetWebRouter(router, buildFS, indexPage) | 		SetWebRouter(router, buildFS, indexPage) | ||||||
| 	} else { | 	} else { | ||||||
|   | |||||||
| @@ -19,7 +19,7 @@ func SetRelayRouter(router *gin.Engine) { | |||||||
| 	{ | 	{ | ||||||
| 		relayV1Router.POST("/completions", controller.Relay) | 		relayV1Router.POST("/completions", controller.Relay) | ||||||
| 		relayV1Router.POST("/chat/completions", controller.Relay) | 		relayV1Router.POST("/chat/completions", controller.Relay) | ||||||
| 		relayV1Router.POST("/edits", controller.RelayNotImplemented) | 		relayV1Router.POST("/edits", controller.Relay) | ||||||
| 		relayV1Router.POST("/images/generations", controller.RelayNotImplemented) | 		relayV1Router.POST("/images/generations", controller.RelayNotImplemented) | ||||||
| 		relayV1Router.POST("/images/edits", controller.RelayNotImplemented) | 		relayV1Router.POST("/images/edits", controller.RelayNotImplemented) | ||||||
| 		relayV1Router.POST("/images/variations", controller.RelayNotImplemented) | 		relayV1Router.POST("/images/variations", controller.RelayNotImplemented) | ||||||
|   | |||||||
| @@ -30,6 +30,9 @@ function renderType(type) { | |||||||
| function renderBalance(type, balance) { | function renderBalance(type, balance) { | ||||||
|   switch (type) { |   switch (type) { | ||||||
|     case 1: // OpenAI |     case 1: // OpenAI | ||||||
|  |       return <span>${balance.toFixed(2)}</span>; | ||||||
|  |     case 4: // CloseAI | ||||||
|  |       return <span>¥{balance.toFixed(2)}</span>; | ||||||
|     case 8: // 自定义 |     case 8: // 自定义 | ||||||
|       return <span>${balance.toFixed(2)}</span>; |       return <span>${balance.toFixed(2)}</span>; | ||||||
|     case 5: // OpenAI-SB |     case 5: // OpenAI-SB | ||||||
| @@ -38,6 +41,8 @@ function renderBalance(type, balance) { | |||||||
|       return <span>{renderNumber(balance)}</span>; |       return <span>{renderNumber(balance)}</span>; | ||||||
|     case 12: // API2GPT |     case 12: // API2GPT | ||||||
|       return <span>¥{balance.toFixed(2)}</span>; |       return <span>¥{balance.toFixed(2)}</span>; | ||||||
|  |     case 13: // AIGC2D | ||||||
|  |       return <span>{renderNumber(balance)}</span>; | ||||||
|     default: |     default: | ||||||
|       return <span>不支持</span>; |       return <span>不支持</span>; | ||||||
|   } |   } | ||||||
| @@ -58,8 +63,8 @@ const ChannelsTable = () => { | |||||||
|       if (startIdx === 0) { |       if (startIdx === 0) { | ||||||
|         setChannels(data); |         setChannels(data); | ||||||
|       } else { |       } else { | ||||||
|         let newChannels = channels; |         let newChannels = [...channels]; | ||||||
|         newChannels.push(...data); |         newChannels.splice(startIdx * ITEMS_PER_PAGE, data.length, ...data); | ||||||
|         setChannels(newChannels); |         setChannels(newChannels); | ||||||
|       } |       } | ||||||
|     } else { |     } else { | ||||||
| @@ -80,7 +85,7 @@ const ChannelsTable = () => { | |||||||
|  |  | ||||||
|   const refresh = async () => { |   const refresh = async () => { | ||||||
|     setLoading(true); |     setLoading(true); | ||||||
|     await loadChannels(0); |     await loadChannels(activePage - 1); | ||||||
|   }; |   }; | ||||||
|  |  | ||||||
|   useEffect(() => { |   useEffect(() => { | ||||||
| @@ -238,7 +243,7 @@ const ChannelsTable = () => { | |||||||
|     if (channels.length === 0) return; |     if (channels.length === 0) return; | ||||||
|     setLoading(true); |     setLoading(true); | ||||||
|     let sortedChannels = [...channels]; |     let sortedChannels = [...channels]; | ||||||
|     if (typeof sortedChannels[0][key] === 'string'){ |     if (typeof sortedChannels[0][key] === 'string') { | ||||||
|       sortedChannels.sort((a, b) => { |       sortedChannels.sort((a, b) => { | ||||||
|         return ('' + a[key]).localeCompare(b[key]); |         return ('' + a[key]).localeCompare(b[key]); | ||||||
|       }); |       }); | ||||||
|   | |||||||
| @@ -108,7 +108,7 @@ const LogsTable = () => { | |||||||
|         setLogs(data); |         setLogs(data); | ||||||
|       } else { |       } else { | ||||||
|         let newLogs = [...logs]; |         let newLogs = [...logs]; | ||||||
|         newLogs.push(...data); |         newLogs.splice(startIdx * ITEMS_PER_PAGE, data.length, ...data); | ||||||
|         setLogs(newLogs); |         setLogs(newLogs); | ||||||
|       } |       } | ||||||
|     } else { |     } else { | ||||||
|   | |||||||
| @@ -74,9 +74,6 @@ const OperationSetting = () => { | |||||||
|   const submitConfig = async (group) => { |   const submitConfig = async (group) => { | ||||||
|     switch (group) { |     switch (group) { | ||||||
|       case 'monitor': |       case 'monitor': | ||||||
|         if (originInputs['AutomaticDisableChannelEnabled'] !== inputs.AutomaticDisableChannelEnabled) { |  | ||||||
|           await updateOption('AutomaticDisableChannelEnabled', inputs.AutomaticDisableChannelEnabled); |  | ||||||
|         } |  | ||||||
|         if (originInputs['ChannelDisableThreshold'] !== inputs.ChannelDisableThreshold) { |         if (originInputs['ChannelDisableThreshold'] !== inputs.ChannelDisableThreshold) { | ||||||
|           await updateOption('ChannelDisableThreshold', inputs.ChannelDisableThreshold); |           await updateOption('ChannelDisableThreshold', inputs.ChannelDisableThreshold); | ||||||
|         } |         } | ||||||
|   | |||||||
| @@ -45,8 +45,8 @@ const TokensTable = () => { | |||||||
|       if (startIdx === 0) { |       if (startIdx === 0) { | ||||||
|         setTokens(data); |         setTokens(data); | ||||||
|       } else { |       } else { | ||||||
|         let newTokens = tokens; |         let newTokens = [...tokens]; | ||||||
|         newTokens.push(...data); |         newTokens.splice(startIdx * ITEMS_PER_PAGE, data.length, ...data); | ||||||
|         setTokens(newTokens); |         setTokens(newTokens); | ||||||
|       } |       } | ||||||
|     } else { |     } else { | ||||||
| @@ -67,7 +67,7 @@ const TokensTable = () => { | |||||||
|  |  | ||||||
|   const refresh = async () => { |   const refresh = async () => { | ||||||
|     setLoading(true); |     setLoading(true); | ||||||
|     await loadTokens(0); |     await loadTokens(activePage - 1); | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   useEffect(() => { |   useEffect(() => { | ||||||
|   | |||||||
| @@ -183,14 +183,6 @@ const UsersTable = () => { | |||||||
|             > |             > | ||||||
|               分组 |               分组 | ||||||
|             </Table.HeaderCell> |             </Table.HeaderCell> | ||||||
|             <Table.HeaderCell |  | ||||||
|               style={{ cursor: 'pointer' }} |  | ||||||
|               onClick={() => { |  | ||||||
|                 sortUser('email'); |  | ||||||
|               }} |  | ||||||
|             > |  | ||||||
|               邮箱地址 |  | ||||||
|             </Table.HeaderCell> |  | ||||||
|             <Table.HeaderCell |             <Table.HeaderCell | ||||||
|               style={{ cursor: 'pointer' }} |               style={{ cursor: 'pointer' }} | ||||||
|               onClick={() => { |               onClick={() => { | ||||||
| @@ -233,20 +225,19 @@ const UsersTable = () => { | |||||||
|                   <Table.Cell> |                   <Table.Cell> | ||||||
|                     <Popup |                     <Popup | ||||||
|                       content={user.email ? user.email : '未绑定邮箱地址'} |                       content={user.email ? user.email : '未绑定邮箱地址'} | ||||||
|                       key={user.display_name} |                       key={user.username} | ||||||
|                       header={user.display_name ? user.display_name : user.username} |  | ||||||
|                       trigger={<span>{renderText(user.username, 10)}</span>} |                       trigger={<span>{renderText(user.username, 10)}</span>} | ||||||
|                       hoverable |                       hoverable | ||||||
|                     /> |                     /> | ||||||
|                   </Table.Cell> |                   </Table.Cell> | ||||||
|                   <Table.Cell>{renderGroup(user.group)}</Table.Cell> |                   <Table.Cell>{renderGroup(user.group)}</Table.Cell> | ||||||
|  |                   {/*<Table.Cell>*/} | ||||||
|  |                   {/*  {user.email ? <Popup hoverable content={user.email} trigger={<span>{renderText(user.email, 24)}</span>} /> : '无'}*/} | ||||||
|  |                   {/*</Table.Cell>*/} | ||||||
|                   <Table.Cell> |                   <Table.Cell> | ||||||
|                     {user.email ? <Popup hoverable content={user.email} trigger={<span>{renderText(user.email, 24)}</span>} /> : '无'} |                     <Popup content='剩余额度' trigger={<Label basic>{renderQuota(user.quota)}</Label>} /> | ||||||
|                   </Table.Cell> |                     <Popup content='已用额度' trigger={<Label basic>{renderQuota(user.used_quota)}</Label>} /> | ||||||
|                   <Table.Cell> |                     <Popup content='请求次数' trigger={<Label basic>{renderNumber(user.request_count)}</Label>} /> | ||||||
|                     <Popup content='剩余额度' trigger={<Label>{renderQuota(user.quota)}</Label>} /> |  | ||||||
|                     <Popup content='已用额度' trigger={<Label>{renderQuota(user.used_quota)}</Label>} /> |  | ||||||
|                     <Popup content='请求次数' trigger={<Label>{renderNumber(user.request_count)}</Label>} /> |  | ||||||
|                   </Table.Cell> |                   </Table.Cell> | ||||||
|                   <Table.Cell>{renderRole(user.role)}</Table.Cell> |                   <Table.Cell>{renderRole(user.role)}</Table.Cell> | ||||||
|                   <Table.Cell>{renderStatus(user.status)}</Table.Cell> |                   <Table.Cell>{renderStatus(user.status)}</Table.Cell> | ||||||
| @@ -320,7 +311,7 @@ const UsersTable = () => { | |||||||
|  |  | ||||||
|         <Table.Footer> |         <Table.Footer> | ||||||
|           <Table.Row> |           <Table.Row> | ||||||
|             <Table.HeaderCell colSpan='8'> |             <Table.HeaderCell colSpan='7'> | ||||||
|               <Button size='small' as={Link} to='/user/add' loading={loading}> |               <Button size='small' as={Link} to='/user/add' loading={loading}> | ||||||
|                 添加新的用户 |                 添加新的用户 | ||||||
|               </Button> |               </Button> | ||||||
|   | |||||||
| @@ -9,5 +9,6 @@ export const CHANNEL_OPTIONS = [ | |||||||
|   { key: 7, text: 'OhMyGPT', value: 7, color: 'purple' }, |   { key: 7, text: 'OhMyGPT', value: 7, color: 'purple' }, | ||||||
|   { key: 9, text: 'AI.LS', value: 9, color: 'yellow' }, |   { key: 9, text: 'AI.LS', value: 9, color: 'yellow' }, | ||||||
|   { key: 10, text: 'AI Proxy', value: 10, color: 'purple' }, |   { key: 10, text: 'AI Proxy', value: 10, color: 'purple' }, | ||||||
|   { key: 12, text: 'API2GPT', value: 12, color: 'blue' } |   { key: 12, text: 'API2GPT', value: 12, color: 'blue' }, | ||||||
| ]; |   { key: 13, text: 'AIGC2D', value: 13, color: 'purple' } | ||||||
|  | ]; | ||||||
| @@ -1,9 +1,15 @@ | |||||||
| import React, { useEffect, useState } from 'react'; | import React, { useEffect, useState } from 'react'; | ||||||
| import { Button, Form, Header, Message, Segment } from 'semantic-ui-react'; | import { Button, Form, Header, Message, Segment } from 'semantic-ui-react'; | ||||||
| import { useParams } from 'react-router-dom'; | import { useParams } from 'react-router-dom'; | ||||||
| import { API, showError, showInfo, showSuccess } from '../../helpers'; | import { API, showError, showInfo, showSuccess, verifyJSON } from '../../helpers'; | ||||||
| import { CHANNEL_OPTIONS } from '../../constants'; | import { CHANNEL_OPTIONS } from '../../constants'; | ||||||
|  |  | ||||||
|  | const MODEL_MAPPING_EXAMPLE = { | ||||||
|  |   'gpt-3.5-turbo-0301': 'gpt-3.5-turbo', | ||||||
|  |   'gpt-4-0314': 'gpt-4', | ||||||
|  |   'gpt-4-32k-0314': 'gpt-4-32k' | ||||||
|  | }; | ||||||
|  |  | ||||||
| const EditChannel = () => { | const EditChannel = () => { | ||||||
|   const params = useParams(); |   const params = useParams(); | ||||||
|   const channelId = params.id; |   const channelId = params.id; | ||||||
| @@ -15,6 +21,7 @@ const EditChannel = () => { | |||||||
|     key: '', |     key: '', | ||||||
|     base_url: '', |     base_url: '', | ||||||
|     other: '', |     other: '', | ||||||
|  |     model_mapping: '', | ||||||
|     models: [], |     models: [], | ||||||
|     groups: ['default'] |     groups: ['default'] | ||||||
|   }; |   }; | ||||||
| @@ -42,6 +49,9 @@ const EditChannel = () => { | |||||||
|       } else { |       } else { | ||||||
|         data.groups = data.group.split(','); |         data.groups = data.group.split(','); | ||||||
|       } |       } | ||||||
|  |       if (data.model_mapping !== '') { | ||||||
|  |         data.model_mapping = JSON.stringify(JSON.parse(data.model_mapping), null, 2); | ||||||
|  |       } | ||||||
|       setInputs(data); |       setInputs(data); | ||||||
|     } else { |     } else { | ||||||
|       showError(message); |       showError(message); | ||||||
| @@ -94,6 +104,10 @@ const EditChannel = () => { | |||||||
|       showInfo('请至少选择一个模型!'); |       showInfo('请至少选择一个模型!'); | ||||||
|       return; |       return; | ||||||
|     } |     } | ||||||
|  |     if (inputs.model_mapping !== '' && !verifyJSON(inputs.model_mapping)) { | ||||||
|  |       showInfo('模型映射必须是合法的 JSON 格式!'); | ||||||
|  |       return; | ||||||
|  |     } | ||||||
|     let localInputs = inputs; |     let localInputs = inputs; | ||||||
|     if (localInputs.base_url.endsWith('/')) { |     if (localInputs.base_url.endsWith('/')) { | ||||||
|       localInputs.base_url = localInputs.base_url.slice(0, localInputs.base_url.length - 1); |       localInputs.base_url = localInputs.base_url.slice(0, localInputs.base_url.length - 1); | ||||||
| @@ -131,6 +145,7 @@ const EditChannel = () => { | |||||||
|             <Form.Select |             <Form.Select | ||||||
|               label='类型' |               label='类型' | ||||||
|               name='type' |               name='type' | ||||||
|  |               required | ||||||
|               options={CHANNEL_OPTIONS} |               options={CHANNEL_OPTIONS} | ||||||
|               value={inputs.type} |               value={inputs.type} | ||||||
|               onChange={handleInputChange} |               onChange={handleInputChange} | ||||||
| @@ -187,7 +202,7 @@ const EditChannel = () => { | |||||||
|                 <Form.Input |                 <Form.Input | ||||||
|                   label='镜像' |                   label='镜像' | ||||||
|                   name='base_url' |                   name='base_url' | ||||||
|                   placeholder={'请输入镜像站地址,格式为:https://domain.com,可不填,不填则使用渠道默认值'} |                   placeholder={'此项可选,输入镜像站地址,格式为:https://domain.com'} | ||||||
|                   onChange={handleInputChange} |                   onChange={handleInputChange} | ||||||
|                   value={inputs.base_url} |                   value={inputs.base_url} | ||||||
|                   autoComplete='new-password' |                   autoComplete='new-password' | ||||||
| @@ -198,6 +213,7 @@ const EditChannel = () => { | |||||||
|           <Form.Field> |           <Form.Field> | ||||||
|             <Form.Input |             <Form.Input | ||||||
|               label='名称' |               label='名称' | ||||||
|  |               required | ||||||
|               name='name' |               name='name' | ||||||
|               placeholder={'请输入名称'} |               placeholder={'请输入名称'} | ||||||
|               onChange={handleInputChange} |               onChange={handleInputChange} | ||||||
| @@ -210,6 +226,7 @@ const EditChannel = () => { | |||||||
|               label='分组' |               label='分组' | ||||||
|               placeholder={'请选择分组'} |               placeholder={'请选择分组'} | ||||||
|               name='groups' |               name='groups' | ||||||
|  |               required | ||||||
|               fluid |               fluid | ||||||
|               multiple |               multiple | ||||||
|               selection |               selection | ||||||
| @@ -226,6 +243,7 @@ const EditChannel = () => { | |||||||
|               label='模型' |               label='模型' | ||||||
|               placeholder={'请选择该通道所支持的模型'} |               placeholder={'请选择该通道所支持的模型'} | ||||||
|               name='models' |               name='models' | ||||||
|  |               required | ||||||
|               fluid |               fluid | ||||||
|               multiple |               multiple | ||||||
|               selection |               selection | ||||||
| @@ -246,11 +264,23 @@ const EditChannel = () => { | |||||||
|               handleInputChange(null, { name: 'models', value: [] }); |               handleInputChange(null, { name: 'models', value: [] }); | ||||||
|             }}>清除所有模型</Button> |             }}>清除所有模型</Button> | ||||||
|           </div> |           </div> | ||||||
|  |           <Form.Field> | ||||||
|  |             <Form.TextArea | ||||||
|  |               label='模型映射' | ||||||
|  |               placeholder={`此项可选,为一个 JSON 文本,键为用户请求的模型名称,值为要替换的模型名称,例如:\n${JSON.stringify(MODEL_MAPPING_EXAMPLE, null, 2)}`} | ||||||
|  |               name='model_mapping' | ||||||
|  |               onChange={handleInputChange} | ||||||
|  |               value={inputs.model_mapping} | ||||||
|  |               style={{ minHeight: 150, fontFamily: 'JetBrains Mono, Consolas' }} | ||||||
|  |               autoComplete='new-password' | ||||||
|  |             /> | ||||||
|  |           </Form.Field> | ||||||
|           { |           { | ||||||
|             batch ? <Form.Field> |             batch ? <Form.Field> | ||||||
|               <Form.TextArea |               <Form.TextArea | ||||||
|                 label='密钥' |                 label='密钥' | ||||||
|                 name='key' |                 name='key' | ||||||
|  |                 required | ||||||
|                 placeholder={'请输入密钥,一行一个'} |                 placeholder={'请输入密钥,一行一个'} | ||||||
|                 onChange={handleInputChange} |                 onChange={handleInputChange} | ||||||
|                 value={inputs.key} |                 value={inputs.key} | ||||||
| @@ -261,6 +291,7 @@ const EditChannel = () => { | |||||||
|               <Form.Input |               <Form.Input | ||||||
|                 label='密钥' |                 label='密钥' | ||||||
|                 name='key' |                 name='key' | ||||||
|  |                 required | ||||||
|                 placeholder={'请输入密钥'} |                 placeholder={'请输入密钥'} | ||||||
|                 onChange={handleInputChange} |                 onChange={handleInputChange} | ||||||
|                 value={inputs.key} |                 value={inputs.key} | ||||||
|   | |||||||
| @@ -11,7 +11,7 @@ const EditToken = () => { | |||||||
|   const [loading, setLoading] = useState(isEdit); |   const [loading, setLoading] = useState(isEdit); | ||||||
|   const originInputs = { |   const originInputs = { | ||||||
|     name: '', |     name: '', | ||||||
|     remain_quota: 0, |     remain_quota: isEdit ? 0 : 500000, | ||||||
|     expired_time: -1, |     expired_time: -1, | ||||||
|     unlimited_quota: false |     unlimited_quota: false | ||||||
|   }; |   }; | ||||||
|   | |||||||
| @@ -2,6 +2,7 @@ import React, { useEffect, useState } from 'react'; | |||||||
| import { Button, Form, Header, Segment } from 'semantic-ui-react'; | import { Button, Form, Header, Segment } from 'semantic-ui-react'; | ||||||
| import { useParams } from 'react-router-dom'; | import { useParams } from 'react-router-dom'; | ||||||
| import { API, showError, showSuccess } from '../../helpers'; | import { API, showError, showSuccess } from '../../helpers'; | ||||||
|  | import { renderQuota, renderQuotaWithPrompt } from '../../helpers/render'; | ||||||
|  |  | ||||||
| const EditUser = () => { | const EditUser = () => { | ||||||
|   const params = useParams(); |   const params = useParams(); | ||||||
| @@ -134,7 +135,7 @@ const EditUser = () => { | |||||||
|               </Form.Field> |               </Form.Field> | ||||||
|               <Form.Field> |               <Form.Field> | ||||||
|                 <Form.Input |                 <Form.Input | ||||||
|                   label='剩余额度' |                   label={`剩余额度${renderQuotaWithPrompt(quota)}`} | ||||||
|                   name='quota' |                   name='quota' | ||||||
|                   placeholder={'请输入新的剩余额度'} |                   placeholder={'请输入新的剩余额度'} | ||||||
|                   onChange={handleInputChange} |                   onChange={handleInputChange} | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user