mirror of
				https://github.com/songquanpeng/one-api.git
				synced 2025-11-04 07:43:41 +08:00 
			
		
		
		
	Compare commits
	
		
			78 Commits
		
	
	
		
			v0.5.12-al
			...
			v0.6.2-alp
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					7cd57f3125 | ||
| 
						 | 
					66efabd5ae | ||
| 
						 | 
					8ede66a896 | ||
| 
						 | 
					b169173860 | ||
| 
						 | 
					f33555ae78 | ||
| 
						 | 
					c28ec10795 | ||
| 
						 | 
					e3767cbb07 | ||
| 
						 | 
					be9eb59fbb | ||
| 
						 | 
					89e111ac69 | ||
| 
						 | 
					2dcef85285 | ||
| 
						 | 
					79d0cd378a | ||
| 
						 | 
					e99150bdb9 | ||
| 
						 | 
					a72e5fcc9e | ||
| 
						 | 
					0710f8cd66 | ||
| 
						 | 
					49cad7d4a5 | ||
| 
						 | 
					a90161cf00 | ||
| 
						 | 
					a45fc7d736 | ||
| 
						 | 
					45940dcb12 | ||
| 
						 | 
					969042b001 | ||
| 
						 | 
					7e7369dbc4 | ||
| 
						 | 
					e54e647170 | ||
| 
						 | 
					358920c858 | ||
| 
						 | 
					1ea598c773 | ||
| 
						 | 
					796be42487 | ||
| 
						 | 
					5b50eb94e5 | ||
| 
						 | 
					71c61365eb | ||
| 
						 | 
					b09f979b80 | ||
| 
						 | 
					12440874b0 | ||
| 
						 | 
					6ebc99460e | ||
| 
						 | 
					27ad8bfb98 | ||
| 
						 | 
					8388aa537f | ||
| 
						 | 
					2346bf70af | ||
| 
						 | 
					f05b403ca5 | ||
| 
						 | 
					b33616df44 | ||
| 
						 | 
					cf16f44970 | ||
| 
						 | 
					bf2e26a48f | ||
| 
						 | 
					4fb22ad4ce | ||
| 
						 | 
					95cfb8e8c9 | ||
| 
						 | 
					c6ace985c2 | ||
| 
						 | 
					10a926b8f3 | ||
| 
						 | 
					2df877a352 | ||
| 
						 | 
					9d8967f7d3 | ||
| 
						 | 
					b35f3523d3 | ||
| 
						 | 
					82e916b5ff | ||
| 
						 | 
					de18d6fe16 | ||
| 
						 | 
					1d0b7fb5ae | ||
| 
						 | 
					f9490bb72e | ||
| 
						 | 
					76467285e8 | ||
| 
						 | 
					df1fd9aa81 | ||
| 
						 | 
					614c2e0442 | ||
| 
						 | 
					eac6a0b9aa | ||
| 
						 | 
					b747cdbc6f | ||
| 
						 | 
					6b27d6659a | ||
| 
						 | 
					dc5b781191 | ||
| 
						 | 
					c880b4a9a3 | ||
| 
						 | 
					565ea58e68 | ||
| 
						 | 
					f141a37a9e | ||
| 
						 | 
					5b78886ad3 | ||
| 
						 | 
					87c7c4f0e6 | ||
| 
						 | 
					4c4a873890 | ||
| 
						 | 
					0664bdfda1 | ||
| 
						 | 
					32387d9c20 | ||
| 
						 | 
					bd888f2eb7 | ||
| 
						 | 
					cece77e533 | ||
| 
						 | 
					2a5468e23c | ||
| 
						 | 
					d0e415893b | ||
| 
						 | 
					6cf5ce9a7a | ||
| 
						 | 
					f598b9df87 | ||
| 
						 | 
					532c50d212 | ||
| 
						 | 
					2acc2f5017 | ||
| 
						 | 
					604ac56305 | ||
| 
						 | 
					9383b638a6 | ||
| 
						 | 
					28d512a675 | ||
| 
						 | 
					de9a58ca0b | ||
| 
						 | 
					1aa374ccfb | ||
| 
						 | 
					d548a01c59 | ||
| 
						 | 
					2cd1a78203 | ||
| 
						 | 
					b9d3cb0c45 | 
							
								
								
									
										7
									
								
								.github/workflows/docker-image-amd64-en.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.github/workflows/docker-image-amd64-en.yml
									
									
									
									
										vendored
									
									
								
							@@ -20,6 +20,13 @@ jobs:
 | 
			
		||||
      - name: Check out the repo
 | 
			
		||||
        uses: actions/checkout@v3
 | 
			
		||||
 | 
			
		||||
      - name: Check repository URL
 | 
			
		||||
        run: |
 | 
			
		||||
          REPO_URL=$(git config --get remote.origin.url)
 | 
			
		||||
          if [[ $REPO_URL == *"pro" ]]; then
 | 
			
		||||
            exit 1
 | 
			
		||||
          fi      
 | 
			
		||||
 | 
			
		||||
      - name: Save version info
 | 
			
		||||
        run: |
 | 
			
		||||
          git describe --tags > VERSION 
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										7
									
								
								.github/workflows/docker-image-amd64.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.github/workflows/docker-image-amd64.yml
									
									
									
									
										vendored
									
									
								
							@@ -20,6 +20,13 @@ jobs:
 | 
			
		||||
      - name: Check out the repo
 | 
			
		||||
        uses: actions/checkout@v3
 | 
			
		||||
 | 
			
		||||
      - name: Check repository URL
 | 
			
		||||
        run: |
 | 
			
		||||
          REPO_URL=$(git config --get remote.origin.url)
 | 
			
		||||
          if [[ $REPO_URL == *"pro" ]]; then
 | 
			
		||||
            exit 1
 | 
			
		||||
          fi        
 | 
			
		||||
 | 
			
		||||
      - name: Save version info
 | 
			
		||||
        run: |
 | 
			
		||||
          git describe --tags > VERSION 
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										7
									
								
								.github/workflows/docker-image-arm64.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.github/workflows/docker-image-arm64.yml
									
									
									
									
										vendored
									
									
								
							@@ -21,6 +21,13 @@ jobs:
 | 
			
		||||
      - name: Check out the repo
 | 
			
		||||
        uses: actions/checkout@v3
 | 
			
		||||
 | 
			
		||||
      - name: Check repository URL
 | 
			
		||||
        run: |
 | 
			
		||||
          REPO_URL=$(git config --get remote.origin.url)
 | 
			
		||||
          if [[ $REPO_URL == *"pro" ]]; then
 | 
			
		||||
            exit 1
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
      - name: Save version info
 | 
			
		||||
        run: |
 | 
			
		||||
          git describe --tags > VERSION 
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										10
									
								
								.github/workflows/linux-release.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								.github/workflows/linux-release.yml
									
									
									
									
										vendored
									
									
								
							@@ -20,10 +20,16 @@ jobs:
 | 
			
		||||
        uses: actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          fetch-depth: 0
 | 
			
		||||
      - name: Check repository URL
 | 
			
		||||
        run: |
 | 
			
		||||
          REPO_URL=$(git config --get remote.origin.url)
 | 
			
		||||
          if [[ $REPO_URL == *"pro" ]]; then
 | 
			
		||||
            exit 1
 | 
			
		||||
          fi
 | 
			
		||||
      - uses: actions/setup-node@v3
 | 
			
		||||
        with:
 | 
			
		||||
          node-version: 16
 | 
			
		||||
      - name: Build Frontend (theme default)
 | 
			
		||||
      - name: Build Frontend
 | 
			
		||||
        env:
 | 
			
		||||
          CI: ""
 | 
			
		||||
        run: |
 | 
			
		||||
@@ -38,7 +44,7 @@ jobs:
 | 
			
		||||
      - name: Build Backend (amd64)
 | 
			
		||||
        run: |
 | 
			
		||||
          go mod download
 | 
			
		||||
          go build -ldflags "-s -w -X 'one-api/common.Version=$(git describe --tags)' -extldflags '-static'" -o one-api
 | 
			
		||||
          go build -ldflags "-s -w -X 'github.com/songquanpeng/one-api/common.Version=$(git describe --tags)' -extldflags '-static'" -o one-api
 | 
			
		||||
 | 
			
		||||
      - name: Build Backend (arm64)
 | 
			
		||||
        run: |
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										10
									
								
								.github/workflows/macos-release.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								.github/workflows/macos-release.yml
									
									
									
									
										vendored
									
									
								
							@@ -20,10 +20,16 @@ jobs:
 | 
			
		||||
        uses: actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          fetch-depth: 0
 | 
			
		||||
      - name: Check repository URL
 | 
			
		||||
        run: |
 | 
			
		||||
          REPO_URL=$(git config --get remote.origin.url)
 | 
			
		||||
          if [[ $REPO_URL == *"pro" ]]; then
 | 
			
		||||
            exit 1
 | 
			
		||||
          fi
 | 
			
		||||
      - uses: actions/setup-node@v3
 | 
			
		||||
        with:
 | 
			
		||||
          node-version: 16
 | 
			
		||||
      - name: Build Frontend (theme default)
 | 
			
		||||
      - name: Build Frontend
 | 
			
		||||
        env:
 | 
			
		||||
          CI: ""
 | 
			
		||||
        run: |
 | 
			
		||||
@@ -38,7 +44,7 @@ jobs:
 | 
			
		||||
      - name: Build Backend
 | 
			
		||||
        run: |
 | 
			
		||||
          go mod download
 | 
			
		||||
          go build -ldflags "-X 'one-api/common.Version=$(git describe --tags)'" -o one-api-macos
 | 
			
		||||
          go build -ldflags "-X 'github.com/songquanpeng/one-api/common.Version=$(git describe --tags)'" -o one-api-macos
 | 
			
		||||
      - name: Release
 | 
			
		||||
        uses: softprops/action-gh-release@v1
 | 
			
		||||
        if: startsWith(github.ref, 'refs/tags/')
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										10
									
								
								.github/workflows/windows-release.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								.github/workflows/windows-release.yml
									
									
									
									
										vendored
									
									
								
							@@ -23,10 +23,16 @@ jobs:
 | 
			
		||||
        uses: actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          fetch-depth: 0
 | 
			
		||||
      - name: Check repository URL
 | 
			
		||||
        run: |
 | 
			
		||||
          REPO_URL=$(git config --get remote.origin.url)
 | 
			
		||||
          if [[ $REPO_URL == *"pro" ]]; then
 | 
			
		||||
            exit 1
 | 
			
		||||
          fi
 | 
			
		||||
      - uses: actions/setup-node@v3
 | 
			
		||||
        with:
 | 
			
		||||
          node-version: 16
 | 
			
		||||
      - name: Build Frontend (theme default)
 | 
			
		||||
      - name: Build Frontend
 | 
			
		||||
        env:
 | 
			
		||||
          CI: ""
 | 
			
		||||
        run: |
 | 
			
		||||
@@ -41,7 +47,7 @@ jobs:
 | 
			
		||||
      - name: Build Backend
 | 
			
		||||
        run: |
 | 
			
		||||
          go mod download
 | 
			
		||||
          go build -ldflags "-s -w -X 'one-api/common.Version=$(git describe --tags)'" -o one-api.exe
 | 
			
		||||
          go build -ldflags "-s -w -X 'github.com/songquanpeng/one-api/common.Version=$(git describe --tags)'" -o one-api.exe
 | 
			
		||||
      - name: Release
 | 
			
		||||
        uses: softprops/action-gh-release@v1
 | 
			
		||||
        if: startsWith(github.ref, 'refs/tags/')
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										3
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							@@ -6,4 +6,5 @@ upload
 | 
			
		||||
build
 | 
			
		||||
*.db-journal
 | 
			
		||||
logs
 | 
			
		||||
data
 | 
			
		||||
data
 | 
			
		||||
/web/node_modules
 | 
			
		||||
 
 | 
			
		||||
@@ -23,7 +23,7 @@ ADD go.mod go.sum ./
 | 
			
		||||
RUN go mod download
 | 
			
		||||
COPY . .
 | 
			
		||||
COPY --from=builder /web/build ./web/build
 | 
			
		||||
RUN go build -ldflags "-s -w -X 'one-api/common.Version=$(cat VERSION)' -extldflags '-static'" -o one-api
 | 
			
		||||
RUN go build -ldflags "-s -w -X 'github.com/songquanpeng/one-api/common.Version=$(cat VERSION)' -extldflags '-static'" -o one-api
 | 
			
		||||
 | 
			
		||||
FROM alpine
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										12
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								README.md
									
									
									
									
									
								
							@@ -67,12 +67,20 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用 
 | 
			
		||||
   + [x] [OpenAI ChatGPT 系列模型](https://platform.openai.com/docs/guides/gpt/chat-completions-api)(支持 [Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference))
 | 
			
		||||
   + [x] [Anthropic Claude 系列模型](https://anthropic.com)
 | 
			
		||||
   + [x] [Google PaLM2/Gemini 系列模型](https://developers.generativeai.google)
 | 
			
		||||
   + [x] [Mistral 系列模型](https://mistral.ai/)
 | 
			
		||||
   + [x] [百度文心一言系列模型](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html)
 | 
			
		||||
   + [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html)
 | 
			
		||||
   + [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html)
 | 
			
		||||
   + [x] [智谱 ChatGLM 系列模型](https://bigmodel.cn)
 | 
			
		||||
   + [x] [360 智脑](https://ai.360.cn)
 | 
			
		||||
   + [x] [腾讯混元大模型](https://cloud.tencent.com/document/product/1729)
 | 
			
		||||
   + [x] [Moonshot AI](https://platform.moonshot.cn/)
 | 
			
		||||
   + [x] [百川大模型](https://platform.baichuan-ai.com)
 | 
			
		||||
   + [ ] [字节云雀大模型](https://www.volcengine.com/product/ark) (WIP)
 | 
			
		||||
   + [x] [MINIMAX](https://api.minimax.chat/)
 | 
			
		||||
   + [x] [Groq](https://wow.groq.com/)
 | 
			
		||||
   + [x] [Ollama](https://github.com/ollama/ollama)
 | 
			
		||||
   + [x] [零一万物](https://platform.lingyiwanwu.com/)
 | 
			
		||||
2. 支持配置镜像以及众多[第三方代理服务](https://iamazing.cn/page/openai-api-third-party-services)。
 | 
			
		||||
3. 支持通过**负载均衡**的方式访问多个渠道。
 | 
			
		||||
4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。
 | 
			
		||||
@@ -100,6 +108,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用 
 | 
			
		||||
    + [GitHub 开放授权](https://github.com/settings/applications/new)。
 | 
			
		||||
    + 微信公众号授权(需要额外部署 [WeChat Server](https://github.com/songquanpeng/wechat-server))。
 | 
			
		||||
23. 支持主题切换,设置环境变量 `THEME` 即可,默认为 `default`,欢迎 PR 更多主题,具体参考[此处](./web/README.md)。
 | 
			
		||||
24. 配合 [Message Pusher](https://github.com/songquanpeng/message-pusher) 可将报警信息推送到多种 App 上。
 | 
			
		||||
 | 
			
		||||
## 部署
 | 
			
		||||
### 基于 Docker 进行部署
 | 
			
		||||
@@ -369,6 +378,9 @@ graph LR
 | 
			
		||||
16. `SQLITE_BUSY_TIMEOUT`:SQLite 锁等待超时设置,单位为毫秒,默认 `3000`。
 | 
			
		||||
17. `GEMINI_SAFETY_SETTING`:Gemini 的安全设置,默认 `BLOCK_NONE`。
 | 
			
		||||
18. `THEME`:系统的主题设置,默认为 `default`,具体可选值参考[此处](./web/README.md)。
 | 
			
		||||
19. `ENABLE_METRIC`:是否根据请求成功率禁用渠道,默认不开启,可选值为 `true` 和 `false`。
 | 
			
		||||
20. `METRIC_QUEUE_SIZE`:请求成功率统计队列大小,默认为 `10`。
 | 
			
		||||
21. `METRIC_SUCCESS_RATE_THRESHOLD`:请求成功率阈值,默认为 `0.8`。
 | 
			
		||||
 | 
			
		||||
### 命令行参数
 | 
			
		||||
1. `--port <port_number>`: 指定服务器监听的端口号,默认为 `3000`。
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										29
									
								
								common/blacklist/main.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								common/blacklist/main.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,29 @@
 | 
			
		||||
package blacklist
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"sync"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var blackList sync.Map
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	blackList = sync.Map{}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func userId2Key(id int) string {
 | 
			
		||||
	return fmt.Sprintf("userid_%d", id)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func BanUser(id int) {
 | 
			
		||||
	blackList.Store(userId2Key(id), true)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func UnbanUser(id int) {
 | 
			
		||||
	blackList.Delete(userId2Key(id))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func IsUserBanned(id int) bool {
 | 
			
		||||
	_, ok := blackList.Load(userId2Key(id))
 | 
			
		||||
	return ok
 | 
			
		||||
}
 | 
			
		||||
@@ -1,7 +1,7 @@
 | 
			
		||||
package config
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/env"
 | 
			
		||||
	"os"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"sync"
 | 
			
		||||
@@ -52,6 +52,7 @@ var EmailDomainWhitelist = []string{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var DebugEnabled = os.Getenv("DEBUG") == "true"
 | 
			
		||||
var DebugSQLEnabled = os.Getenv("DEBUG_SQL") == "true"
 | 
			
		||||
var MemoryCacheEnabled = os.Getenv("MEMORY_CACHE_ENABLED") == "true"
 | 
			
		||||
 | 
			
		||||
var LogConsumeEnabled = true
 | 
			
		||||
@@ -69,17 +70,20 @@ var WeChatServerAddress = ""
 | 
			
		||||
var WeChatServerToken = ""
 | 
			
		||||
var WeChatAccountQRCodeImageURL = ""
 | 
			
		||||
 | 
			
		||||
var MessagePusherAddress = ""
 | 
			
		||||
var MessagePusherToken = ""
 | 
			
		||||
 | 
			
		||||
var TurnstileSiteKey = ""
 | 
			
		||||
var TurnstileSecretKey = ""
 | 
			
		||||
 | 
			
		||||
var QuotaForNewUser = 0
 | 
			
		||||
var QuotaForInviter = 0
 | 
			
		||||
var QuotaForInvitee = 0
 | 
			
		||||
var QuotaForNewUser int64 = 0
 | 
			
		||||
var QuotaForInviter int64 = 0
 | 
			
		||||
var QuotaForInvitee int64 = 0
 | 
			
		||||
var ChannelDisableThreshold = 5.0
 | 
			
		||||
var AutomaticDisableChannelEnabled = false
 | 
			
		||||
var AutomaticEnableChannelEnabled = false
 | 
			
		||||
var QuotaRemindThreshold = 1000
 | 
			
		||||
var PreConsumedQuota = 500
 | 
			
		||||
var QuotaRemindThreshold int64 = 1000
 | 
			
		||||
var PreConsumedQuota int64 = 500
 | 
			
		||||
var ApproximateTokenEnabled = false
 | 
			
		||||
var RetryTimes = 0
 | 
			
		||||
 | 
			
		||||
@@ -90,16 +94,16 @@ var IsMasterNode = os.Getenv("NODE_TYPE") != "slave"
 | 
			
		||||
var requestInterval, _ = strconv.Atoi(os.Getenv("POLLING_INTERVAL"))
 | 
			
		||||
var RequestInterval = time.Duration(requestInterval) * time.Second
 | 
			
		||||
 | 
			
		||||
var SyncFrequency = helper.GetOrDefaultEnvInt("SYNC_FREQUENCY", 10*60) // unit is second
 | 
			
		||||
var SyncFrequency = env.Int("SYNC_FREQUENCY", 10*60) // unit is second
 | 
			
		||||
 | 
			
		||||
var BatchUpdateEnabled = false
 | 
			
		||||
var BatchUpdateInterval = helper.GetOrDefaultEnvInt("BATCH_UPDATE_INTERVAL", 5)
 | 
			
		||||
var BatchUpdateInterval = env.Int("BATCH_UPDATE_INTERVAL", 5)
 | 
			
		||||
 | 
			
		||||
var RelayTimeout = helper.GetOrDefaultEnvInt("RELAY_TIMEOUT", 0) // unit is second
 | 
			
		||||
var RelayTimeout = env.Int("RELAY_TIMEOUT", 0) // unit is second
 | 
			
		||||
 | 
			
		||||
var GeminiSafetySetting = helper.GetOrDefaultEnvString("GEMINI_SAFETY_SETTING", "BLOCK_NONE")
 | 
			
		||||
var GeminiSafetySetting = env.String("GEMINI_SAFETY_SETTING", "BLOCK_NONE")
 | 
			
		||||
 | 
			
		||||
var Theme = helper.GetOrDefaultEnvString("THEME", "default")
 | 
			
		||||
var Theme = env.String("THEME", "default")
 | 
			
		||||
var ValidThemes = map[string]bool{
 | 
			
		||||
	"default": true,
 | 
			
		||||
	"berry":   true,
 | 
			
		||||
@@ -108,10 +112,10 @@ var ValidThemes = map[string]bool{
 | 
			
		||||
// All duration's unit is seconds
 | 
			
		||||
// Shouldn't larger then RateLimitKeyExpirationDuration
 | 
			
		||||
var (
 | 
			
		||||
	GlobalApiRateLimitNum            = helper.GetOrDefaultEnvInt("GLOBAL_API_RATE_LIMIT", 180)
 | 
			
		||||
	GlobalApiRateLimitNum            = env.Int("GLOBAL_API_RATE_LIMIT", 180)
 | 
			
		||||
	GlobalApiRateLimitDuration int64 = 3 * 60
 | 
			
		||||
 | 
			
		||||
	GlobalWebRateLimitNum            = helper.GetOrDefaultEnvInt("GLOBAL_WEB_RATE_LIMIT", 60)
 | 
			
		||||
	GlobalWebRateLimitNum            = env.Int("GLOBAL_WEB_RATE_LIMIT", 60)
 | 
			
		||||
	GlobalWebRateLimitDuration int64 = 3 * 60
 | 
			
		||||
 | 
			
		||||
	UploadRateLimitNum            = 10
 | 
			
		||||
@@ -125,3 +129,9 @@ var (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var RateLimitKeyExpirationDuration = 20 * time.Minute
 | 
			
		||||
 | 
			
		||||
var EnableMetric = env.Bool("ENABLE_METRIC", false)
 | 
			
		||||
var MetricQueueSize = env.Int("METRIC_QUEUE_SIZE", 10)
 | 
			
		||||
var MetricSuccessRateThreshold = env.Float64("METRIC_SUCCESS_RATE_THRESHOLD", 0.8)
 | 
			
		||||
var MetricSuccessChanSize = env.Int("METRIC_SUCCESS_CHAN_SIZE", 1024)
 | 
			
		||||
var MetricFailChanSize = env.Int("METRIC_FAIL_CHAN_SIZE", 128)
 | 
			
		||||
 
 | 
			
		||||
@@ -15,6 +15,7 @@ const (
 | 
			
		||||
const (
 | 
			
		||||
	UserStatusEnabled  = 1 // don't use 0, 0 is the default value!
 | 
			
		||||
	UserStatusDisabled = 2 // also don't use 0
 | 
			
		||||
	UserStatusDeleted  = 3
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
@@ -38,31 +39,40 @@ const (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	ChannelTypeUnknown        = 0
 | 
			
		||||
	ChannelTypeOpenAI         = 1
 | 
			
		||||
	ChannelTypeAPI2D          = 2
 | 
			
		||||
	ChannelTypeAzure          = 3
 | 
			
		||||
	ChannelTypeCloseAI        = 4
 | 
			
		||||
	ChannelTypeOpenAISB       = 5
 | 
			
		||||
	ChannelTypeOpenAIMax      = 6
 | 
			
		||||
	ChannelTypeOhMyGPT        = 7
 | 
			
		||||
	ChannelTypeCustom         = 8
 | 
			
		||||
	ChannelTypeAILS           = 9
 | 
			
		||||
	ChannelTypeAIProxy        = 10
 | 
			
		||||
	ChannelTypePaLM           = 11
 | 
			
		||||
	ChannelTypeAPI2GPT        = 12
 | 
			
		||||
	ChannelTypeAIGC2D         = 13
 | 
			
		||||
	ChannelTypeAnthropic      = 14
 | 
			
		||||
	ChannelTypeBaidu          = 15
 | 
			
		||||
	ChannelTypeZhipu          = 16
 | 
			
		||||
	ChannelTypeAli            = 17
 | 
			
		||||
	ChannelTypeXunfei         = 18
 | 
			
		||||
	ChannelType360            = 19
 | 
			
		||||
	ChannelTypeOpenRouter     = 20
 | 
			
		||||
	ChannelTypeAIProxyLibrary = 21
 | 
			
		||||
	ChannelTypeFastGPT        = 22
 | 
			
		||||
	ChannelTypeTencent        = 23
 | 
			
		||||
	ChannelTypeGemini         = 24
 | 
			
		||||
	ChannelTypeUnknown = iota
 | 
			
		||||
	ChannelTypeOpenAI
 | 
			
		||||
	ChannelTypeAPI2D
 | 
			
		||||
	ChannelTypeAzure
 | 
			
		||||
	ChannelTypeCloseAI
 | 
			
		||||
	ChannelTypeOpenAISB
 | 
			
		||||
	ChannelTypeOpenAIMax
 | 
			
		||||
	ChannelTypeOhMyGPT
 | 
			
		||||
	ChannelTypeCustom
 | 
			
		||||
	ChannelTypeAILS
 | 
			
		||||
	ChannelTypeAIProxy
 | 
			
		||||
	ChannelTypePaLM
 | 
			
		||||
	ChannelTypeAPI2GPT
 | 
			
		||||
	ChannelTypeAIGC2D
 | 
			
		||||
	ChannelTypeAnthropic
 | 
			
		||||
	ChannelTypeBaidu
 | 
			
		||||
	ChannelTypeZhipu
 | 
			
		||||
	ChannelTypeAli
 | 
			
		||||
	ChannelTypeXunfei
 | 
			
		||||
	ChannelType360
 | 
			
		||||
	ChannelTypeOpenRouter
 | 
			
		||||
	ChannelTypeAIProxyLibrary
 | 
			
		||||
	ChannelTypeFastGPT
 | 
			
		||||
	ChannelTypeTencent
 | 
			
		||||
	ChannelTypeGemini
 | 
			
		||||
	ChannelTypeMoonshot
 | 
			
		||||
	ChannelTypeBaichuan
 | 
			
		||||
	ChannelTypeMinimax
 | 
			
		||||
	ChannelTypeMistral
 | 
			
		||||
	ChannelTypeGroq
 | 
			
		||||
	ChannelTypeOllama
 | 
			
		||||
	ChannelTypeLingYiWanWu
 | 
			
		||||
 | 
			
		||||
	ChannelTypeDummy
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var ChannelBaseURLs = []string{
 | 
			
		||||
@@ -91,4 +101,19 @@ var ChannelBaseURLs = []string{
 | 
			
		||||
	"https://fastgpt.run/api/openapi",           // 22
 | 
			
		||||
	"https://hunyuan.cloud.tencent.com",         // 23
 | 
			
		||||
	"https://generativelanguage.googleapis.com", // 24
 | 
			
		||||
	"https://api.moonshot.cn",                   // 25
 | 
			
		||||
	"https://api.baichuan-ai.com",               // 26
 | 
			
		||||
	"https://api.minimax.chat",                  // 27
 | 
			
		||||
	"https://api.mistral.ai",                    // 28
 | 
			
		||||
	"https://api.groq.com/openai",               // 29
 | 
			
		||||
	"http://localhost:11434",                    // 30
 | 
			
		||||
	"https://api.lingyiwanwu.com",               // 31
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	ConfigKeyPrefix = "cfg_"
 | 
			
		||||
 | 
			
		||||
	ConfigKeyAPIVersion = ConfigKeyPrefix + "api_version"
 | 
			
		||||
	ConfigKeyLibraryID  = ConfigKeyPrefix + "library_id"
 | 
			
		||||
	ConfigKeyPlugin     = ConfigKeyPrefix + "plugin"
 | 
			
		||||
)
 | 
			
		||||
 
 | 
			
		||||
@@ -1,9 +1,12 @@
 | 
			
		||||
package common
 | 
			
		||||
 | 
			
		||||
import "one-api/common/helper"
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/env"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var UsingSQLite = false
 | 
			
		||||
var UsingPostgreSQL = false
 | 
			
		||||
var UsingMySQL = false
 | 
			
		||||
 | 
			
		||||
var SQLitePath = "one-api.db"
 | 
			
		||||
var SQLiteBusyTimeout = helper.GetOrDefaultEnvInt("SQLITE_BUSY_TIMEOUT", 3000)
 | 
			
		||||
var SQLiteBusyTimeout = env.Int("SQLITE_BUSY_TIMEOUT", 3000)
 | 
			
		||||
 
 | 
			
		||||
@@ -15,10 +15,7 @@ type embedFileSystem struct {
 | 
			
		||||
 | 
			
		||||
func (e embedFileSystem) Exists(prefix string, path string) bool {
 | 
			
		||||
	_, err := e.Open(path)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	return true
 | 
			
		||||
	return err == nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func EmbedFolder(fsEmbed embed.FS, targetPath string) static.ServeFileSystem {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										42
									
								
								common/env/helper.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								common/env/helper.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,42 @@
 | 
			
		||||
package env
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"os"
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func Bool(env string, defaultValue bool) bool {
 | 
			
		||||
	if env == "" || os.Getenv(env) == "" {
 | 
			
		||||
		return defaultValue
 | 
			
		||||
	}
 | 
			
		||||
	return os.Getenv(env) == "true"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Int(env string, defaultValue int) int {
 | 
			
		||||
	if env == "" || os.Getenv(env) == "" {
 | 
			
		||||
		return defaultValue
 | 
			
		||||
	}
 | 
			
		||||
	num, err := strconv.Atoi(os.Getenv(env))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return defaultValue
 | 
			
		||||
	}
 | 
			
		||||
	return num
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Float64(env string, defaultValue float64) float64 {
 | 
			
		||||
	if env == "" || os.Getenv(env) == "" {
 | 
			
		||||
		return defaultValue
 | 
			
		||||
	}
 | 
			
		||||
	num, err := strconv.ParseFloat(os.Getenv(env), 64)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return defaultValue
 | 
			
		||||
	}
 | 
			
		||||
	return num
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func String(env string, defaultValue string) string {
 | 
			
		||||
	if env == "" || os.Getenv(env) == "" {
 | 
			
		||||
		return defaultValue
 | 
			
		||||
	}
 | 
			
		||||
	return os.Getenv(env)
 | 
			
		||||
}
 | 
			
		||||
@@ -8,12 +8,24 @@ import (
 | 
			
		||||
	"strings"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func UnmarshalBodyReusable(c *gin.Context, v any) error {
 | 
			
		||||
const KeyRequestBody = "key_request_body"
 | 
			
		||||
 | 
			
		||||
func GetRequestBody(c *gin.Context) ([]byte, error) {
 | 
			
		||||
	requestBody, _ := c.Get(KeyRequestBody)
 | 
			
		||||
	if requestBody != nil {
 | 
			
		||||
		return requestBody.([]byte), nil
 | 
			
		||||
	}
 | 
			
		||||
	requestBody, err := io.ReadAll(c.Request.Body)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	err = c.Request.Body.Close()
 | 
			
		||||
	_ = c.Request.Body.Close()
 | 
			
		||||
	c.Set(KeyRequestBody, requestBody)
 | 
			
		||||
	return requestBody.([]byte), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func UnmarshalBodyReusable(c *gin.Context, v any) error {
 | 
			
		||||
	requestBody, err := GetRequestBody(c)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,7 @@ package common
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var GroupRatio = map[string]float64{
 | 
			
		||||
 
 | 
			
		||||
@@ -7,8 +7,6 @@ import (
 | 
			
		||||
	"log"
 | 
			
		||||
	"math/rand"
 | 
			
		||||
	"net"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"os"
 | 
			
		||||
	"os/exec"
 | 
			
		||||
	"runtime"
 | 
			
		||||
	"strconv"
 | 
			
		||||
@@ -107,13 +105,13 @@ func Seconds2Time(num int) (time string) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Interface2String(inter interface{}) string {
 | 
			
		||||
	switch inter.(type) {
 | 
			
		||||
	switch inter := inter.(type) {
 | 
			
		||||
	case string:
 | 
			
		||||
		return inter.(string)
 | 
			
		||||
		return inter
 | 
			
		||||
	case int:
 | 
			
		||||
		return fmt.Sprintf("%d", inter.(int))
 | 
			
		||||
		return fmt.Sprintf("%d", inter)
 | 
			
		||||
	case float64:
 | 
			
		||||
		return fmt.Sprintf("%f", inter.(float64))
 | 
			
		||||
		return fmt.Sprintf("%f", inter)
 | 
			
		||||
	}
 | 
			
		||||
	return "Not Implemented"
 | 
			
		||||
}
 | 
			
		||||
@@ -137,6 +135,7 @@ func GetUUID() string {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const keyChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
 | 
			
		||||
const keyNumbers = "0123456789"
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	rand.Seed(time.Now().UnixNano())
 | 
			
		||||
@@ -168,6 +167,15 @@ func GetRandomString(length int) string {
 | 
			
		||||
	return string(key)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GetRandomNumberString(length int) string {
 | 
			
		||||
	rand.Seed(time.Now().UnixNano())
 | 
			
		||||
	key := make([]byte, length)
 | 
			
		||||
	for i := 0; i < length; i++ {
 | 
			
		||||
		key[i] = keyNumbers[rand.Intn(len(keyNumbers))]
 | 
			
		||||
	}
 | 
			
		||||
	return string(key)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GetTimestamp() int64 {
 | 
			
		||||
	return time.Now().Unix()
 | 
			
		||||
}
 | 
			
		||||
@@ -177,6 +185,10 @@ func GetTimeString() string {
 | 
			
		||||
	return fmt.Sprintf("%s%d", now.Format("20060102150405"), now.UnixNano()%1e9)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GenRequestID() string {
 | 
			
		||||
	return GetTimeString() + GetRandomNumberString(8)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Max(a int, b int) int {
 | 
			
		||||
	if a >= b {
 | 
			
		||||
		return a
 | 
			
		||||
@@ -185,25 +197,6 @@ func Max(a int, b int) int {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GetOrDefaultEnvInt(env string, defaultValue int) int {
 | 
			
		||||
	if env == "" || os.Getenv(env) == "" {
 | 
			
		||||
		return defaultValue
 | 
			
		||||
	}
 | 
			
		||||
	num, err := strconv.Atoi(os.Getenv(env))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		logger.SysError(fmt.Sprintf("failed to parse %s: %s, using default value: %d", env, err.Error(), defaultValue))
 | 
			
		||||
		return defaultValue
 | 
			
		||||
	}
 | 
			
		||||
	return num
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GetOrDefaultEnvString(env string, defaultValue string) string {
 | 
			
		||||
	if env == "" || os.Getenv(env) == "" {
 | 
			
		||||
		return defaultValue
 | 
			
		||||
	}
 | 
			
		||||
	return os.Getenv(env)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func AssignOrDefault(value string, defaultValue string) string {
 | 
			
		||||
	if len(value) != 0 {
 | 
			
		||||
		return value
 | 
			
		||||
 
 | 
			
		||||
@@ -12,7 +12,7 @@ import (
 | 
			
		||||
	"strings"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	img "one-api/common/image"
 | 
			
		||||
	img "github.com/songquanpeng/one-api/common/image"
 | 
			
		||||
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
	_ "golang.org/x/image/webp"
 | 
			
		||||
 
 | 
			
		||||
@@ -3,9 +3,9 @@ package common
 | 
			
		||||
import (
 | 
			
		||||
	"flag"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"log"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"os"
 | 
			
		||||
	"path/filepath"
 | 
			
		||||
)
 | 
			
		||||
 
 | 
			
		||||
@@ -4,6 +4,8 @@ import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"io"
 | 
			
		||||
	"log"
 | 
			
		||||
	"os"
 | 
			
		||||
@@ -13,14 +15,12 @@ import (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	loggerDEBUG = "DEBUG"
 | 
			
		||||
	loggerINFO  = "INFO"
 | 
			
		||||
	loggerWarn  = "WARN"
 | 
			
		||||
	loggerError = "ERR"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const maxLogCount = 1000000
 | 
			
		||||
 | 
			
		||||
var logCount int
 | 
			
		||||
var setupLogLock sync.Mutex
 | 
			
		||||
var setupLogWorking bool
 | 
			
		||||
 | 
			
		||||
@@ -55,6 +55,12 @@ func SysError(s string) {
 | 
			
		||||
	_, _ = fmt.Fprintf(gin.DefaultErrorWriter, "[SYS] %v | %s \n", t.Format("2006/01/02 - 15:04:05"), s)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Debug(ctx context.Context, msg string) {
 | 
			
		||||
	if config.DebugEnabled {
 | 
			
		||||
		logHelper(ctx, loggerDEBUG, msg)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Info(ctx context.Context, msg string) {
 | 
			
		||||
	logHelper(ctx, loggerINFO, msg)
 | 
			
		||||
}
 | 
			
		||||
@@ -67,16 +73,20 @@ func Error(ctx context.Context, msg string) {
 | 
			
		||||
	logHelper(ctx, loggerError, msg)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Debugf(ctx context.Context, format string, a ...any) {
 | 
			
		||||
	Debug(ctx, fmt.Sprintf(format, a...))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Infof(ctx context.Context, format string, a ...any) {
 | 
			
		||||
	Info(ctx, fmt.Sprintf(format, a))
 | 
			
		||||
	Info(ctx, fmt.Sprintf(format, a...))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Warnf(ctx context.Context, format string, a ...any) {
 | 
			
		||||
	Warn(ctx, fmt.Sprintf(format, a))
 | 
			
		||||
	Warn(ctx, fmt.Sprintf(format, a...))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Errorf(ctx context.Context, format string, a ...any) {
 | 
			
		||||
	Error(ctx, fmt.Sprintf(format, a))
 | 
			
		||||
	Error(ctx, fmt.Sprintf(format, a...))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func logHelper(ctx context.Context, level string, msg string) {
 | 
			
		||||
@@ -85,11 +95,12 @@ func logHelper(ctx context.Context, level string, msg string) {
 | 
			
		||||
		writer = gin.DefaultWriter
 | 
			
		||||
	}
 | 
			
		||||
	id := ctx.Value(RequestIdKey)
 | 
			
		||||
	if id == nil {
 | 
			
		||||
		id = helper.GenRequestID()
 | 
			
		||||
	}
 | 
			
		||||
	now := time.Now()
 | 
			
		||||
	_, _ = fmt.Fprintf(writer, "[%s] %v | %s | %s \n", level, now.Format("2006/01/02 - 15:04:05"), id, msg)
 | 
			
		||||
	logCount++ // we don't need accurate count, so no lock here
 | 
			
		||||
	if logCount > maxLogCount && !setupLogWorking {
 | 
			
		||||
		logCount = 0
 | 
			
		||||
	if !setupLogWorking {
 | 
			
		||||
		setupLogWorking = true
 | 
			
		||||
		go func() {
 | 
			
		||||
			SetupLogger()
 | 
			
		||||
 
 | 
			
		||||
@@ -1,17 +1,20 @@
 | 
			
		||||
package common
 | 
			
		||||
package message
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"crypto/rand"
 | 
			
		||||
	"crypto/tls"
 | 
			
		||||
	"encoding/base64"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"net/smtp"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func SendEmail(subject string, receiver string, content string) error {
 | 
			
		||||
	if receiver == "" {
 | 
			
		||||
		return fmt.Errorf("receiver is empty")
 | 
			
		||||
	}
 | 
			
		||||
	if config.SMTPFrom == "" { // for compatibility
 | 
			
		||||
		config.SMTPFrom = config.SMTPAccount
 | 
			
		||||
	}
 | 
			
		||||
							
								
								
									
										22
									
								
								common/message/main.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								common/message/main.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,22 @@
 | 
			
		||||
package message
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	ByAll           = "all"
 | 
			
		||||
	ByEmail         = "email"
 | 
			
		||||
	ByMessagePusher = "message_pusher"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func Notify(by string, title string, description string, content string) error {
 | 
			
		||||
	if by == ByEmail {
 | 
			
		||||
		return SendEmail(title, config.RootUserEmail, content)
 | 
			
		||||
	}
 | 
			
		||||
	if by == ByMessagePusher {
 | 
			
		||||
		return SendMessage(title, description, content)
 | 
			
		||||
	}
 | 
			
		||||
	return fmt.Errorf("unknown notify method: %s", by)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										53
									
								
								common/message/message-pusher.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										53
									
								
								common/message/message-pusher.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,53 @@
 | 
			
		||||
package message
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"net/http"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type request struct {
 | 
			
		||||
	Title       string `json:"title"`
 | 
			
		||||
	Description string `json:"description"`
 | 
			
		||||
	Content     string `json:"content"`
 | 
			
		||||
	URL         string `json:"url"`
 | 
			
		||||
	Channel     string `json:"channel"`
 | 
			
		||||
	Token       string `json:"token"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type response struct {
 | 
			
		||||
	Success bool   `json:"success"`
 | 
			
		||||
	Message string `json:"message"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func SendMessage(title string, description string, content string) error {
 | 
			
		||||
	if config.MessagePusherAddress == "" {
 | 
			
		||||
		return errors.New("message pusher address is not set")
 | 
			
		||||
	}
 | 
			
		||||
	req := request{
 | 
			
		||||
		Title:       title,
 | 
			
		||||
		Description: description,
 | 
			
		||||
		Content:     content,
 | 
			
		||||
		Token:       config.MessagePusherToken,
 | 
			
		||||
	}
 | 
			
		||||
	data, err := json.Marshal(req)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	resp, err := http.Post(config.MessagePusherAddress,
 | 
			
		||||
		"application/json", bytes.NewBuffer(data))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	var res response
 | 
			
		||||
	err = json.NewDecoder(resp.Body).Decode(&res)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	if !res.Success {
 | 
			
		||||
		return errors.New(res.Message)
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
@@ -2,97 +2,92 @@ package common
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var DalleSizeRatios = map[string]map[string]float64{
 | 
			
		||||
	"dall-e-2": {
 | 
			
		||||
		"256x256":   1,
 | 
			
		||||
		"512x512":   1.125,
 | 
			
		||||
		"1024x1024": 1.25,
 | 
			
		||||
	},
 | 
			
		||||
	"dall-e-3": {
 | 
			
		||||
		"1024x1024": 1,
 | 
			
		||||
		"1024x1792": 2,
 | 
			
		||||
		"1792x1024": 2,
 | 
			
		||||
	},
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var DalleGenerationImageAmounts = map[string][2]int{
 | 
			
		||||
	"dall-e-2": {1, 10},
 | 
			
		||||
	"dall-e-3": {1, 1}, // OpenAI allows n=1 currently.
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var DalleImagePromptLengthLimitations = map[string]int{
 | 
			
		||||
	"dall-e-2": 1000,
 | 
			
		||||
	"dall-e-3": 4000,
 | 
			
		||||
}
 | 
			
		||||
const (
 | 
			
		||||
	USD2RMB = 7
 | 
			
		||||
	USD     = 500 // $0.002 = 1 -> $1 = 500
 | 
			
		||||
	RMB     = USD / USD2RMB
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// ModelRatio
 | 
			
		||||
// https://platform.openai.com/docs/models/model-endpoint-compatibility
 | 
			
		||||
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Blfmc9dlf
 | 
			
		||||
// https://openai.com/pricing
 | 
			
		||||
// TODO: when a new api is enabled, check the pricing here
 | 
			
		||||
// 1 === $0.002 / 1K tokens
 | 
			
		||||
// 1 === ¥0.014 / 1k tokens
 | 
			
		||||
var ModelRatio = map[string]float64{
 | 
			
		||||
	"gpt-4":                     15,
 | 
			
		||||
	"gpt-4-0314":                15,
 | 
			
		||||
	"gpt-4-0613":                15,
 | 
			
		||||
	"gpt-4-32k":                 30,
 | 
			
		||||
	"gpt-4-32k-0314":            30,
 | 
			
		||||
	"gpt-4-32k-0613":            30,
 | 
			
		||||
	"gpt-4-1106-preview":        5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-0125-preview":        5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-turbo-preview":       5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-vision-preview":      5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo":             0.75, // $0.0015 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-0301":        0.75,
 | 
			
		||||
	"gpt-3.5-turbo-0613":        0.75,
 | 
			
		||||
	"gpt-3.5-turbo-16k":         1.5, // $0.003 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-16k-0613":    1.5,
 | 
			
		||||
	"gpt-3.5-turbo-instruct":    0.75, // $0.0015 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-1106":        0.5,  // $0.001 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-0125":        0.25, // $0.0005 / 1K tokens
 | 
			
		||||
	"davinci-002":               1,    // $0.002 / 1K tokens
 | 
			
		||||
	"babbage-002":               0.2,  // $0.0004 / 1K tokens
 | 
			
		||||
	"text-ada-001":              0.2,
 | 
			
		||||
	"text-babbage-001":          0.25,
 | 
			
		||||
	"text-curie-001":            1,
 | 
			
		||||
	"text-davinci-002":          10,
 | 
			
		||||
	"text-davinci-003":          10,
 | 
			
		||||
	"text-davinci-edit-001":     10,
 | 
			
		||||
	"code-davinci-edit-001":     10,
 | 
			
		||||
	"whisper-1":                 15,  // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
 | 
			
		||||
	"tts-1":                     7.5, // $0.015 / 1K characters
 | 
			
		||||
	"tts-1-1106":                7.5,
 | 
			
		||||
	"tts-1-hd":                  15, // $0.030 / 1K characters
 | 
			
		||||
	"tts-1-hd-1106":             15,
 | 
			
		||||
	"davinci":                   10,
 | 
			
		||||
	"curie":                     10,
 | 
			
		||||
	"babbage":                   10,
 | 
			
		||||
	"ada":                       10,
 | 
			
		||||
	"text-embedding-ada-002":    0.05,
 | 
			
		||||
	"text-embedding-3-small":    0.01,
 | 
			
		||||
	"text-embedding-3-large":    0.065,
 | 
			
		||||
	"text-search-ada-doc-001":   10,
 | 
			
		||||
	"text-moderation-stable":    0.1,
 | 
			
		||||
	"text-moderation-latest":    0.1,
 | 
			
		||||
	"dall-e-2":                  8,      // $0.016 - $0.020 / image
 | 
			
		||||
	"dall-e-3":                  20,     // $0.040 - $0.120 / image
 | 
			
		||||
	"claude-instant-1":          0.815,  // $1.63 / 1M tokens
 | 
			
		||||
	"claude-2":                  5.51,   // $11.02 / 1M tokens
 | 
			
		||||
	"claude-2.0":                5.51,   // $11.02 / 1M tokens
 | 
			
		||||
	"claude-2.1":                5.51,   // $11.02 / 1M tokens
 | 
			
		||||
	"ERNIE-Bot":                 0.8572, // ¥0.012 / 1k tokens
 | 
			
		||||
	"ERNIE-Bot-turbo":           0.5715, // ¥0.008 / 1k tokens
 | 
			
		||||
	"ERNIE-Bot-4":               8.572,  // ¥0.12 / 1k tokens
 | 
			
		||||
	"Embedding-V1":              0.1429, // ¥0.002 / 1k tokens
 | 
			
		||||
	"PaLM-2":                    1,
 | 
			
		||||
	"gemini-pro":                1,      // $0.00025 / 1k characters -> $0.001 / 1k tokens
 | 
			
		||||
	"gemini-pro-vision":         1,      // $0.00025 / 1k characters -> $0.001 / 1k tokens
 | 
			
		||||
	// https://openai.com/pricing
 | 
			
		||||
	"gpt-4":                   15,
 | 
			
		||||
	"gpt-4-0314":              15,
 | 
			
		||||
	"gpt-4-0613":              15,
 | 
			
		||||
	"gpt-4-32k":               30,
 | 
			
		||||
	"gpt-4-32k-0314":          30,
 | 
			
		||||
	"gpt-4-32k-0613":          30,
 | 
			
		||||
	"gpt-4-1106-preview":      5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-0125-preview":      5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-turbo-preview":     5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-vision-preview":    5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo":           0.75, // $0.0015 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-0301":      0.75,
 | 
			
		||||
	"gpt-3.5-turbo-0613":      0.75,
 | 
			
		||||
	"gpt-3.5-turbo-16k":       1.5, // $0.003 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-16k-0613":  1.5,
 | 
			
		||||
	"gpt-3.5-turbo-instruct":  0.75, // $0.0015 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-1106":      0.5,  // $0.001 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-0125":      0.25, // $0.0005 / 1K tokens
 | 
			
		||||
	"davinci-002":             1,    // $0.002 / 1K tokens
 | 
			
		||||
	"babbage-002":             0.2,  // $0.0004 / 1K tokens
 | 
			
		||||
	"text-ada-001":            0.2,
 | 
			
		||||
	"text-babbage-001":        0.25,
 | 
			
		||||
	"text-curie-001":          1,
 | 
			
		||||
	"text-davinci-002":        10,
 | 
			
		||||
	"text-davinci-003":        10,
 | 
			
		||||
	"text-davinci-edit-001":   10,
 | 
			
		||||
	"code-davinci-edit-001":   10,
 | 
			
		||||
	"whisper-1":               15,  // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
 | 
			
		||||
	"tts-1":                   7.5, // $0.015 / 1K characters
 | 
			
		||||
	"tts-1-1106":              7.5,
 | 
			
		||||
	"tts-1-hd":                15, // $0.030 / 1K characters
 | 
			
		||||
	"tts-1-hd-1106":           15,
 | 
			
		||||
	"davinci":                 10,
 | 
			
		||||
	"curie":                   10,
 | 
			
		||||
	"babbage":                 10,
 | 
			
		||||
	"ada":                     10,
 | 
			
		||||
	"text-embedding-ada-002":  0.05,
 | 
			
		||||
	"text-embedding-3-small":  0.01,
 | 
			
		||||
	"text-embedding-3-large":  0.065,
 | 
			
		||||
	"text-search-ada-doc-001": 10,
 | 
			
		||||
	"text-moderation-stable":  0.1,
 | 
			
		||||
	"text-moderation-latest":  0.1,
 | 
			
		||||
	"dall-e-2":                8,  // $0.016 - $0.020 / image
 | 
			
		||||
	"dall-e-3":                20, // $0.040 - $0.120 / image
 | 
			
		||||
	// https://www.anthropic.com/api#pricing
 | 
			
		||||
	"claude-instant-1.2":       0.8 / 1000 * USD,
 | 
			
		||||
	"claude-2.0":               8.0 / 1000 * USD,
 | 
			
		||||
	"claude-2.1":               8.0 / 1000 * USD,
 | 
			
		||||
	"claude-3-haiku-20240307":  0.25 / 1000 * USD,
 | 
			
		||||
	"claude-3-sonnet-20240229": 3.0 / 1000 * USD,
 | 
			
		||||
	"claude-3-opus-20240229":   15.0 / 1000 * USD,
 | 
			
		||||
	// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7
 | 
			
		||||
	"ERNIE-Bot":         0.8572,     // ¥0.012 / 1k tokens
 | 
			
		||||
	"ERNIE-Bot-turbo":   0.5715,     // ¥0.008 / 1k tokens
 | 
			
		||||
	"ERNIE-Bot-4":       0.12 * RMB, // ¥0.12 / 1k tokens
 | 
			
		||||
	"ERNIE-Bot-8k":      0.024 * RMB,
 | 
			
		||||
	"Embedding-V1":      0.1429, // ¥0.002 / 1k tokens
 | 
			
		||||
	"bge-large-zh":      0.002 * RMB,
 | 
			
		||||
	"bge-large-en":      0.002 * RMB,
 | 
			
		||||
	"bge-large-8k":      0.002 * RMB,
 | 
			
		||||
	"PaLM-2":            1,
 | 
			
		||||
	"gemini-pro":        1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
 | 
			
		||||
	"gemini-pro-vision": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
 | 
			
		||||
	// https://open.bigmodel.cn/pricing
 | 
			
		||||
	"glm-4":                     0.1 * RMB,
 | 
			
		||||
	"glm-4v":                    0.1 * RMB,
 | 
			
		||||
	"glm-3-turbo":               0.005 * RMB,
 | 
			
		||||
	"chatglm_turbo":             0.3572, // ¥0.005 / 1k tokens
 | 
			
		||||
	"chatglm_pro":               0.7143, // ¥0.01 / 1k tokens
 | 
			
		||||
	"chatglm_std":               0.3572, // ¥0.005 / 1k tokens
 | 
			
		||||
@@ -103,11 +98,81 @@ var ModelRatio = map[string]float64{
 | 
			
		||||
	"qwen-max-longcontext":      1.4286, // ¥0.02 / 1k tokens
 | 
			
		||||
	"text-embedding-v1":         0.05,   // ¥0.0007 / 1k tokens
 | 
			
		||||
	"SparkDesk":                 1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
	"SparkDesk-v1.1":            1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
	"SparkDesk-v2.1":            1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
	"SparkDesk-v3.1":            1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
	"SparkDesk-v3.5":            1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
	"360GPT_S2_V9":              0.8572, // ¥0.012 / 1k tokens
 | 
			
		||||
	"embedding-bert-512-v1":     0.0715, // ¥0.001 / 1k tokens
 | 
			
		||||
	"embedding_s1_v1":           0.0715, // ¥0.001 / 1k tokens
 | 
			
		||||
	"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens
 | 
			
		||||
	"hunyuan":                   7.143,  // ¥0.1 / 1k tokens  // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
 | 
			
		||||
	"ChatStd":                   0.01 * RMB,
 | 
			
		||||
	"ChatPro":                   0.1 * RMB,
 | 
			
		||||
	// https://platform.moonshot.cn/pricing
 | 
			
		||||
	"moonshot-v1-8k":   0.012 * RMB,
 | 
			
		||||
	"moonshot-v1-32k":  0.024 * RMB,
 | 
			
		||||
	"moonshot-v1-128k": 0.06 * RMB,
 | 
			
		||||
	// https://platform.baichuan-ai.com/price
 | 
			
		||||
	"Baichuan2-Turbo":      0.008 * RMB,
 | 
			
		||||
	"Baichuan2-Turbo-192k": 0.016 * RMB,
 | 
			
		||||
	"Baichuan2-53B":        0.02 * RMB,
 | 
			
		||||
	// https://api.minimax.chat/document/price
 | 
			
		||||
	"abab6-chat":    0.1 * RMB,
 | 
			
		||||
	"abab5.5-chat":  0.015 * RMB,
 | 
			
		||||
	"abab5.5s-chat": 0.005 * RMB,
 | 
			
		||||
	// https://docs.mistral.ai/platform/pricing/
 | 
			
		||||
	"open-mistral-7b":       0.25 / 1000 * USD,
 | 
			
		||||
	"open-mixtral-8x7b":     0.7 / 1000 * USD,
 | 
			
		||||
	"mistral-small-latest":  2.0 / 1000 * USD,
 | 
			
		||||
	"mistral-medium-latest": 2.7 / 1000 * USD,
 | 
			
		||||
	"mistral-large-latest":  8.0 / 1000 * USD,
 | 
			
		||||
	"mistral-embed":         0.1 / 1000 * USD,
 | 
			
		||||
	// https://wow.groq.com/
 | 
			
		||||
	"llama2-70b-4096":    0.7 / 1000 * USD,
 | 
			
		||||
	"llama2-7b-2048":     0.1 / 1000 * USD,
 | 
			
		||||
	"mixtral-8x7b-32768": 0.27 / 1000 * USD,
 | 
			
		||||
	"gemma-7b-it":        0.1 / 1000 * USD,
 | 
			
		||||
	// https://platform.lingyiwanwu.com/docs#-计费单元
 | 
			
		||||
	"yi-34b-chat-0205": 2.5 / 1000000 * RMB,
 | 
			
		||||
	"yi-34b-chat-200k": 12.0 / 1000000 * RMB,
 | 
			
		||||
	"yi-vl-plus":       6.0 / 1000000 * RMB,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var CompletionRatio = map[string]float64{}
 | 
			
		||||
 | 
			
		||||
var DefaultModelRatio map[string]float64
 | 
			
		||||
var DefaultCompletionRatio map[string]float64
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	DefaultModelRatio = make(map[string]float64)
 | 
			
		||||
	for k, v := range ModelRatio {
 | 
			
		||||
		DefaultModelRatio[k] = v
 | 
			
		||||
	}
 | 
			
		||||
	DefaultCompletionRatio = make(map[string]float64)
 | 
			
		||||
	for k, v := range CompletionRatio {
 | 
			
		||||
		DefaultCompletionRatio[k] = v
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func AddNewMissingRatio(oldRatio string) string {
 | 
			
		||||
	newRatio := make(map[string]float64)
 | 
			
		||||
	err := json.Unmarshal([]byte(oldRatio), &newRatio)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		logger.SysError("error unmarshalling old ratio: " + err.Error())
 | 
			
		||||
		return oldRatio
 | 
			
		||||
	}
 | 
			
		||||
	for k, v := range DefaultModelRatio {
 | 
			
		||||
		if _, ok := newRatio[k]; !ok {
 | 
			
		||||
			newRatio[k] = v
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	jsonBytes, err := json.Marshal(newRatio)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		logger.SysError("error marshalling new ratio: " + err.Error())
 | 
			
		||||
		return oldRatio
 | 
			
		||||
	}
 | 
			
		||||
	return string(jsonBytes)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func ModelRatio2JSONString() string {
 | 
			
		||||
@@ -128,6 +193,9 @@ func GetModelRatio(name string) float64 {
 | 
			
		||||
		name = strings.TrimSuffix(name, "-internet")
 | 
			
		||||
	}
 | 
			
		||||
	ratio, ok := ModelRatio[name]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		ratio, ok = DefaultModelRatio[name]
 | 
			
		||||
	}
 | 
			
		||||
	if !ok {
 | 
			
		||||
		logger.SysError("model ratio not found: " + name)
 | 
			
		||||
		return 30
 | 
			
		||||
@@ -135,8 +203,6 @@ func GetModelRatio(name string) float64 {
 | 
			
		||||
	return ratio
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var CompletionRatio = map[string]float64{}
 | 
			
		||||
 | 
			
		||||
func CompletionRatio2JSONString() string {
 | 
			
		||||
	jsonBytes, err := json.Marshal(CompletionRatio)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -154,6 +220,9 @@ func GetCompletionRatio(name string) float64 {
 | 
			
		||||
	if ratio, ok := CompletionRatio[name]; ok {
 | 
			
		||||
		return ratio
 | 
			
		||||
	}
 | 
			
		||||
	if ratio, ok := DefaultCompletionRatio[name]; ok {
 | 
			
		||||
		return ratio
 | 
			
		||||
	}
 | 
			
		||||
	if strings.HasPrefix(name, "gpt-3.5") {
 | 
			
		||||
		if strings.HasSuffix(name, "0125") {
 | 
			
		||||
			// https://openai.com/blog/new-embedding-models-and-api-updates
 | 
			
		||||
@@ -172,7 +241,7 @@ func GetCompletionRatio(name string) float64 {
 | 
			
		||||
				return 2
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return 1.333333
 | 
			
		||||
		return 4.0 / 3.0
 | 
			
		||||
	}
 | 
			
		||||
	if strings.HasPrefix(name, "gpt-4") {
 | 
			
		||||
		if strings.HasSuffix(name, "preview") {
 | 
			
		||||
@@ -180,11 +249,18 @@ func GetCompletionRatio(name string) float64 {
 | 
			
		||||
		}
 | 
			
		||||
		return 2
 | 
			
		||||
	}
 | 
			
		||||
	if strings.HasPrefix(name, "claude-instant-1") {
 | 
			
		||||
		return 3.38
 | 
			
		||||
	if strings.HasPrefix(name, "claude-3") {
 | 
			
		||||
		return 5
 | 
			
		||||
	}
 | 
			
		||||
	if strings.HasPrefix(name, "claude-2") {
 | 
			
		||||
		return 2.965517
 | 
			
		||||
	if strings.HasPrefix(name, "claude-") {
 | 
			
		||||
		return 3
 | 
			
		||||
	}
 | 
			
		||||
	if strings.HasPrefix(name, "mistral-") {
 | 
			
		||||
		return 3
 | 
			
		||||
	}
 | 
			
		||||
	switch name {
 | 
			
		||||
	case "llama2-70b-4096":
 | 
			
		||||
		return 0.8 / 0.7
 | 
			
		||||
	}
 | 
			
		||||
	return 1
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										8
									
								
								common/random.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								common/random.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,8 @@
 | 
			
		||||
package common
 | 
			
		||||
 | 
			
		||||
import "math/rand"
 | 
			
		||||
 | 
			
		||||
// RandRange returns a random number between min and max (max is not included)
 | 
			
		||||
func RandRange(min, max int) int {
 | 
			
		||||
	return min + rand.Intn(max-min)
 | 
			
		||||
}
 | 
			
		||||
@@ -3,7 +3,7 @@ package common
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"github.com/go-redis/redis/v8"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"os"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 
 | 
			
		||||
@@ -2,10 +2,10 @@ package common
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func LogQuota(quota int) string {
 | 
			
		||||
func LogQuota(quota int64) string {
 | 
			
		||||
	if config.DisplayInCurrencyEnabled {
 | 
			
		||||
		return fmt.Sprintf("$%.6f 额度", float64(quota)/config.QuotaPerUnit)
 | 
			
		||||
	} else {
 | 
			
		||||
 
 | 
			
		||||
@@ -2,14 +2,14 @@ package controller
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/model"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	relaymodel "github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func GetSubscription(c *gin.Context) {
 | 
			
		||||
	var remainQuota int
 | 
			
		||||
	var usedQuota int
 | 
			
		||||
	var remainQuota int64
 | 
			
		||||
	var usedQuota int64
 | 
			
		||||
	var err error
 | 
			
		||||
	var token *model.Token
 | 
			
		||||
	var expiredTime int64
 | 
			
		||||
@@ -22,13 +22,15 @@ func GetSubscription(c *gin.Context) {
 | 
			
		||||
	} else {
 | 
			
		||||
		userId := c.GetInt("id")
 | 
			
		||||
		remainQuota, err = model.GetUserQuota(userId)
 | 
			
		||||
		usedQuota, err = model.GetUserUsedQuota(userId)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			usedQuota, err = model.GetUserUsedQuota(userId)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if expiredTime <= 0 {
 | 
			
		||||
		expiredTime = 0
 | 
			
		||||
	}
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		Error := openai.Error{
 | 
			
		||||
		Error := relaymodel.Error{
 | 
			
		||||
			Message: err.Error(),
 | 
			
		||||
			Type:    "upstream_error",
 | 
			
		||||
		}
 | 
			
		||||
@@ -58,7 +60,7 @@ func GetSubscription(c *gin.Context) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GetUsage(c *gin.Context) {
 | 
			
		||||
	var quota int
 | 
			
		||||
	var quota int64
 | 
			
		||||
	var err error
 | 
			
		||||
	var token *model.Token
 | 
			
		||||
	if config.DisplayTokenStatEnabled {
 | 
			
		||||
@@ -70,7 +72,7 @@ func GetUsage(c *gin.Context) {
 | 
			
		||||
		quota, err = model.GetUserUsedQuota(userId)
 | 
			
		||||
	}
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		Error := openai.Error{
 | 
			
		||||
		Error := relaymodel.Error{
 | 
			
		||||
			Message: err.Error(),
 | 
			
		||||
			Type:    "one_api_error",
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
@@ -4,13 +4,14 @@ import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/monitor"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/util"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"one-api/model"
 | 
			
		||||
	"one-api/relay/util"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
@@ -295,7 +296,7 @@ func UpdateChannelBalance(c *gin.Context) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func updateAllChannelsBalance() error {
 | 
			
		||||
	channels, err := model.GetAllChannels(0, 0, true)
 | 
			
		||||
	channels, err := model.GetAllChannels(0, 0, "all")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
@@ -313,7 +314,7 @@ func updateAllChannelsBalance() error {
 | 
			
		||||
		} else {
 | 
			
		||||
			// err is nil & balance <= 0 means quota is used up
 | 
			
		||||
			if balance <= 0 {
 | 
			
		||||
				disableChannel(channel.Id, channel.Name, "余额不足")
 | 
			
		||||
				monitor.DisableChannel(channel.Id, channel.Name, "余额不足")
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		time.Sleep(config.RequestInterval)
 | 
			
		||||
@@ -322,15 +323,14 @@ func updateAllChannelsBalance() error {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func UpdateAllChannelsBalance(c *gin.Context) {
 | 
			
		||||
	// TODO: make it async
 | 
			
		||||
	err := updateAllChannelsBalance()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		c.JSON(http.StatusOK, gin.H{
 | 
			
		||||
			"success": false,
 | 
			
		||||
			"message": err.Error(),
 | 
			
		||||
		})
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	//err := updateAllChannelsBalance()
 | 
			
		||||
	//if err != nil {
 | 
			
		||||
	//	c.JSON(http.StatusOK, gin.H{
 | 
			
		||||
	//		"success": false,
 | 
			
		||||
	//		"message": err.Error(),
 | 
			
		||||
	//	})
 | 
			
		||||
	//	return
 | 
			
		||||
	//}
 | 
			
		||||
	c.JSON(http.StatusOK, gin.H{
 | 
			
		||||
		"success": true,
 | 
			
		||||
		"message": "",
 | 
			
		||||
 
 | 
			
		||||
@@ -5,102 +5,36 @@ import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/message"
 | 
			
		||||
	"github.com/songquanpeng/one-api/middleware"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/monitor"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/constant"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/helper"
 | 
			
		||||
	relaymodel "github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/util"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"one-api/model"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
	"one-api/relay/util"
 | 
			
		||||
	"net/http/httptest"
 | 
			
		||||
	"net/url"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func testChannel(channel *model.Channel, request openai.ChatRequest) (err error, openaiErr *openai.Error) {
 | 
			
		||||
	switch channel.Type {
 | 
			
		||||
	case common.ChannelTypePaLM:
 | 
			
		||||
		fallthrough
 | 
			
		||||
	case common.ChannelTypeGemini:
 | 
			
		||||
		fallthrough
 | 
			
		||||
	case common.ChannelTypeAnthropic:
 | 
			
		||||
		fallthrough
 | 
			
		||||
	case common.ChannelTypeBaidu:
 | 
			
		||||
		fallthrough
 | 
			
		||||
	case common.ChannelTypeZhipu:
 | 
			
		||||
		fallthrough
 | 
			
		||||
	case common.ChannelTypeAli:
 | 
			
		||||
		fallthrough
 | 
			
		||||
	case common.ChannelType360:
 | 
			
		||||
		fallthrough
 | 
			
		||||
	case common.ChannelTypeXunfei:
 | 
			
		||||
		return errors.New("该渠道类型当前版本不支持测试,请手动测试"), nil
 | 
			
		||||
	case common.ChannelTypeAzure:
 | 
			
		||||
		request.Model = "gpt-35-turbo"
 | 
			
		||||
		defer func() {
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				err = errors.New("请确保已在 Azure 上创建了 gpt-35-turbo 模型,并且 apiVersion 已正确填写!")
 | 
			
		||||
			}
 | 
			
		||||
		}()
 | 
			
		||||
	default:
 | 
			
		||||
		request.Model = "gpt-3.5-turbo"
 | 
			
		||||
func buildTestRequest() *relaymodel.GeneralOpenAIRequest {
 | 
			
		||||
	testRequest := &relaymodel.GeneralOpenAIRequest{
 | 
			
		||||
		MaxTokens: 2,
 | 
			
		||||
		Stream:    false,
 | 
			
		||||
		Model:     "gpt-3.5-turbo",
 | 
			
		||||
	}
 | 
			
		||||
	requestURL := common.ChannelBaseURLs[channel.Type]
 | 
			
		||||
	if channel.Type == common.ChannelTypeAzure {
 | 
			
		||||
		requestURL = util.GetFullRequestURL(channel.GetBaseURL(), fmt.Sprintf("/openai/deployments/%s/chat/completions?api-version=2023-03-15-preview", request.Model), channel.Type)
 | 
			
		||||
	} else {
 | 
			
		||||
		if baseURL := channel.GetBaseURL(); len(baseURL) > 0 {
 | 
			
		||||
			requestURL = baseURL
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		requestURL = util.GetFullRequestURL(requestURL, "/v1/chat/completions", channel.Type)
 | 
			
		||||
	}
 | 
			
		||||
	jsonData, err := json.Marshal(request)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err, nil
 | 
			
		||||
	}
 | 
			
		||||
	req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(jsonData))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err, nil
 | 
			
		||||
	}
 | 
			
		||||
	if channel.Type == common.ChannelTypeAzure {
 | 
			
		||||
		req.Header.Set("api-key", channel.Key)
 | 
			
		||||
	} else {
 | 
			
		||||
		req.Header.Set("Authorization", "Bearer "+channel.Key)
 | 
			
		||||
	}
 | 
			
		||||
	req.Header.Set("Content-Type", "application/json")
 | 
			
		||||
	resp, err := util.HTTPClient.Do(req)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err, nil
 | 
			
		||||
	}
 | 
			
		||||
	defer resp.Body.Close()
 | 
			
		||||
	var response openai.SlimTextResponse
 | 
			
		||||
	body, err := io.ReadAll(resp.Body)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err, nil
 | 
			
		||||
	}
 | 
			
		||||
	err = json.Unmarshal(body, &response)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return fmt.Errorf("Error: %s\nResp body: %s", err, body), nil
 | 
			
		||||
	}
 | 
			
		||||
	if response.Usage.CompletionTokens == 0 {
 | 
			
		||||
		if response.Error.Message == "" {
 | 
			
		||||
			response.Error.Message = "补全 tokens 非预期返回 0"
 | 
			
		||||
		}
 | 
			
		||||
		return errors.New(fmt.Sprintf("type %s, code %v, message %s", response.Error.Type, response.Error.Code, response.Error.Message)), &response.Error
 | 
			
		||||
	}
 | 
			
		||||
	return nil, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func buildTestRequest() *openai.ChatRequest {
 | 
			
		||||
	testRequest := &openai.ChatRequest{
 | 
			
		||||
		Model:     "", // this will be set later
 | 
			
		||||
		MaxTokens: 1,
 | 
			
		||||
	}
 | 
			
		||||
	testMessage := openai.Message{
 | 
			
		||||
	testMessage := relaymodel.Message{
 | 
			
		||||
		Role:    "user",
 | 
			
		||||
		Content: "hi",
 | 
			
		||||
	}
 | 
			
		||||
@@ -108,6 +42,72 @@ func buildTestRequest() *openai.ChatRequest {
 | 
			
		||||
	return testRequest
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func testChannel(channel *model.Channel) (err error, openaiErr *relaymodel.Error) {
 | 
			
		||||
	w := httptest.NewRecorder()
 | 
			
		||||
	c, _ := gin.CreateTestContext(w)
 | 
			
		||||
	c.Request = &http.Request{
 | 
			
		||||
		Method: "POST",
 | 
			
		||||
		URL:    &url.URL{Path: "/v1/chat/completions"},
 | 
			
		||||
		Body:   nil,
 | 
			
		||||
		Header: make(http.Header),
 | 
			
		||||
	}
 | 
			
		||||
	c.Request.Header.Set("Authorization", "Bearer "+channel.Key)
 | 
			
		||||
	c.Request.Header.Set("Content-Type", "application/json")
 | 
			
		||||
	c.Set("channel", channel.Type)
 | 
			
		||||
	c.Set("base_url", channel.GetBaseURL())
 | 
			
		||||
	middleware.SetupContextForSelectedChannel(c, channel, "")
 | 
			
		||||
	meta := util.GetRelayMeta(c)
 | 
			
		||||
	apiType := constant.ChannelType2APIType(channel.Type)
 | 
			
		||||
	adaptor := helper.GetAdaptor(apiType)
 | 
			
		||||
	if adaptor == nil {
 | 
			
		||||
		return fmt.Errorf("invalid api type: %d, adaptor is nil", apiType), nil
 | 
			
		||||
	}
 | 
			
		||||
	adaptor.Init(meta)
 | 
			
		||||
	modelName := adaptor.GetModelList()[0]
 | 
			
		||||
	if !strings.Contains(channel.Models, modelName) {
 | 
			
		||||
		modelNames := strings.Split(channel.Models, ",")
 | 
			
		||||
		if len(modelNames) > 0 {
 | 
			
		||||
			modelName = modelNames[0]
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	request := buildTestRequest()
 | 
			
		||||
	request.Model = modelName
 | 
			
		||||
	meta.OriginModelName, meta.ActualModelName = modelName, modelName
 | 
			
		||||
	convertedRequest, err := adaptor.ConvertRequest(c, constant.RelayModeChatCompletions, request)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err, nil
 | 
			
		||||
	}
 | 
			
		||||
	jsonData, err := json.Marshal(convertedRequest)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err, nil
 | 
			
		||||
	}
 | 
			
		||||
	requestBody := bytes.NewBuffer(jsonData)
 | 
			
		||||
	c.Request.Body = io.NopCloser(requestBody)
 | 
			
		||||
	resp, err := adaptor.DoRequest(c, meta, requestBody)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err, nil
 | 
			
		||||
	}
 | 
			
		||||
	if resp.StatusCode != http.StatusOK {
 | 
			
		||||
		err := util.RelayErrorHandler(resp)
 | 
			
		||||
		return fmt.Errorf("status code %d: %s", resp.StatusCode, err.Error.Message), &err.Error
 | 
			
		||||
	}
 | 
			
		||||
	usage, respErr := adaptor.DoResponse(c, resp, meta)
 | 
			
		||||
	if respErr != nil {
 | 
			
		||||
		return fmt.Errorf("%s", respErr.Error.Message), &respErr.Error
 | 
			
		||||
	}
 | 
			
		||||
	if usage == nil {
 | 
			
		||||
		return errors.New("usage is nil"), nil
 | 
			
		||||
	}
 | 
			
		||||
	result := w.Result()
 | 
			
		||||
	// print result.Body
 | 
			
		||||
	respBody, err := io.ReadAll(result.Body)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err, nil
 | 
			
		||||
	}
 | 
			
		||||
	logger.SysLog(fmt.Sprintf("testing channel #%d, response: \n%s", channel.Id, string(respBody)))
 | 
			
		||||
	return nil, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestChannel(c *gin.Context) {
 | 
			
		||||
	id, err := strconv.Atoi(c.Param("id"))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -125,9 +125,8 @@ func TestChannel(c *gin.Context) {
 | 
			
		||||
		})
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	testRequest := buildTestRequest()
 | 
			
		||||
	tik := time.Now()
 | 
			
		||||
	err, _ = testChannel(channel, *testRequest)
 | 
			
		||||
	err, _ = testChannel(channel)
 | 
			
		||||
	tok := time.Now()
 | 
			
		||||
	milliseconds := tok.Sub(tik).Milliseconds()
 | 
			
		||||
	go channel.UpdateResponseTime(milliseconds)
 | 
			
		||||
@@ -151,33 +150,7 @@ func TestChannel(c *gin.Context) {
 | 
			
		||||
var testAllChannelsLock sync.Mutex
 | 
			
		||||
var testAllChannelsRunning bool = false
 | 
			
		||||
 | 
			
		||||
func notifyRootUser(subject string, content string) {
 | 
			
		||||
	if config.RootUserEmail == "" {
 | 
			
		||||
		config.RootUserEmail = model.GetRootUserEmail()
 | 
			
		||||
	}
 | 
			
		||||
	err := common.SendEmail(subject, config.RootUserEmail, content)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		logger.SysError(fmt.Sprintf("failed to send email: %s", err.Error()))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// disable & notify
 | 
			
		||||
func disableChannel(channelId int, channelName string, reason string) {
 | 
			
		||||
	model.UpdateChannelStatusById(channelId, common.ChannelStatusAutoDisabled)
 | 
			
		||||
	subject := fmt.Sprintf("通道「%s」(#%d)已被禁用", channelName, channelId)
 | 
			
		||||
	content := fmt.Sprintf("通道「%s」(#%d)已被禁用,原因:%s", channelName, channelId, reason)
 | 
			
		||||
	notifyRootUser(subject, content)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// enable & notify
 | 
			
		||||
func enableChannel(channelId int, channelName string) {
 | 
			
		||||
	model.UpdateChannelStatusById(channelId, common.ChannelStatusEnabled)
 | 
			
		||||
	subject := fmt.Sprintf("通道「%s」(#%d)已被启用", channelName, channelId)
 | 
			
		||||
	content := fmt.Sprintf("通道「%s」(#%d)已被启用", channelName, channelId)
 | 
			
		||||
	notifyRootUser(subject, content)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func testAllChannels(notify bool) error {
 | 
			
		||||
func testChannels(notify bool, scope string) error {
 | 
			
		||||
	if config.RootUserEmail == "" {
 | 
			
		||||
		config.RootUserEmail = model.GetRootUserEmail()
 | 
			
		||||
	}
 | 
			
		||||
@@ -188,11 +161,10 @@ func testAllChannels(notify bool) error {
 | 
			
		||||
	}
 | 
			
		||||
	testAllChannelsRunning = true
 | 
			
		||||
	testAllChannelsLock.Unlock()
 | 
			
		||||
	channels, err := model.GetAllChannels(0, 0, true)
 | 
			
		||||
	channels, err := model.GetAllChannels(0, 0, scope)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	testRequest := buildTestRequest()
 | 
			
		||||
	var disableThreshold = int64(config.ChannelDisableThreshold * 1000)
 | 
			
		||||
	if disableThreshold == 0 {
 | 
			
		||||
		disableThreshold = 10000000 // a impossible value
 | 
			
		||||
@@ -201,18 +173,22 @@ func testAllChannels(notify bool) error {
 | 
			
		||||
		for _, channel := range channels {
 | 
			
		||||
			isChannelEnabled := channel.Status == common.ChannelStatusEnabled
 | 
			
		||||
			tik := time.Now()
 | 
			
		||||
			err, openaiErr := testChannel(channel, *testRequest)
 | 
			
		||||
			err, openaiErr := testChannel(channel)
 | 
			
		||||
			tok := time.Now()
 | 
			
		||||
			milliseconds := tok.Sub(tik).Milliseconds()
 | 
			
		||||
			if isChannelEnabled && milliseconds > disableThreshold {
 | 
			
		||||
				err = errors.New(fmt.Sprintf("响应时间 %.2fs 超过阈值 %.2fs", float64(milliseconds)/1000.0, float64(disableThreshold)/1000.0))
 | 
			
		||||
				disableChannel(channel.Id, channel.Name, err.Error())
 | 
			
		||||
				if config.AutomaticDisableChannelEnabled {
 | 
			
		||||
					monitor.DisableChannel(channel.Id, channel.Name, err.Error())
 | 
			
		||||
				} else {
 | 
			
		||||
					_ = message.Notify(message.ByAll, fmt.Sprintf("渠道 %s (%d)测试超时", channel.Name, channel.Id), "", err.Error())
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			if isChannelEnabled && util.ShouldDisableChannel(openaiErr, -1) {
 | 
			
		||||
				disableChannel(channel.Id, channel.Name, err.Error())
 | 
			
		||||
				monitor.DisableChannel(channel.Id, channel.Name, err.Error())
 | 
			
		||||
			}
 | 
			
		||||
			if !isChannelEnabled && util.ShouldEnableChannel(err, openaiErr) {
 | 
			
		||||
				enableChannel(channel.Id, channel.Name)
 | 
			
		||||
				monitor.EnableChannel(channel.Id, channel.Name)
 | 
			
		||||
			}
 | 
			
		||||
			channel.UpdateResponseTime(milliseconds)
 | 
			
		||||
			time.Sleep(config.RequestInterval)
 | 
			
		||||
@@ -221,7 +197,7 @@ func testAllChannels(notify bool) error {
 | 
			
		||||
		testAllChannelsRunning = false
 | 
			
		||||
		testAllChannelsLock.Unlock()
 | 
			
		||||
		if notify {
 | 
			
		||||
			err := common.SendEmail("通道测试完成", config.RootUserEmail, "通道测试完成,如果没有收到禁用通知,说明所有通道都正常")
 | 
			
		||||
			err := message.Notify(message.ByAll, "通道测试完成", "", "通道测试完成,如果没有收到禁用通知,说明所有通道都正常")
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				logger.SysError(fmt.Sprintf("failed to send email: %s", err.Error()))
 | 
			
		||||
			}
 | 
			
		||||
@@ -230,8 +206,12 @@ func testAllChannels(notify bool) error {
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestAllChannels(c *gin.Context) {
 | 
			
		||||
	err := testAllChannels(true)
 | 
			
		||||
func TestChannels(c *gin.Context) {
 | 
			
		||||
	scope := c.Query("scope")
 | 
			
		||||
	if scope == "" {
 | 
			
		||||
		scope = "all"
 | 
			
		||||
	}
 | 
			
		||||
	err := testChannels(true, scope)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		c.JSON(http.StatusOK, gin.H{
 | 
			
		||||
			"success": false,
 | 
			
		||||
@@ -250,7 +230,7 @@ func AutomaticallyTestChannels(frequency int) {
 | 
			
		||||
	for {
 | 
			
		||||
		time.Sleep(time.Duration(frequency) * time.Minute)
 | 
			
		||||
		logger.SysLog("testing all channels")
 | 
			
		||||
		_ = testAllChannels(false)
 | 
			
		||||
		_ = testChannels(false, "all")
 | 
			
		||||
		logger.SysLog("channel test finished")
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -2,10 +2,10 @@ package controller
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/model"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
)
 | 
			
		||||
@@ -15,7 +15,7 @@ func GetAllChannels(c *gin.Context) {
 | 
			
		||||
	if p < 0 {
 | 
			
		||||
		p = 0
 | 
			
		||||
	}
 | 
			
		||||
	channels, err := model.GetAllChannels(p*config.ItemsPerPage, config.ItemsPerPage, false)
 | 
			
		||||
	channels, err := model.GetAllChannels(p*config.ItemsPerPage, config.ItemsPerPage, "limited")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		c.JSON(http.StatusOK, gin.H{
 | 
			
		||||
			"success": false,
 | 
			
		||||
 
 | 
			
		||||
@@ -7,12 +7,12 @@ import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-contrib/sessions"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"one-api/model"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 
 | 
			
		||||
@@ -2,13 +2,13 @@ package controller
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func GetGroups(c *gin.Context) {
 | 
			
		||||
	groupNames := make([]string, 0)
 | 
			
		||||
	for groupName, _ := range common.GroupRatio {
 | 
			
		||||
	for groupName := range common.GroupRatio {
 | 
			
		||||
		groupNames = append(groupNames, groupName)
 | 
			
		||||
	}
 | 
			
		||||
	c.JSON(http.StatusOK, gin.H{
 | 
			
		||||
 
 | 
			
		||||
@@ -2,9 +2,9 @@ package controller
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/model"
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -3,10 +3,11 @@ package controller
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/message"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/model"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
@@ -110,7 +111,7 @@ func SendEmailVerification(c *gin.Context) {
 | 
			
		||||
	content := fmt.Sprintf("<p>您好,你正在进行%s邮箱验证。</p>"+
 | 
			
		||||
		"<p>您的验证码为: <strong>%s</strong></p>"+
 | 
			
		||||
		"<p>验证码 %d 分钟内有效,如果不是本人操作,请忽略。</p>", config.SystemName, code, common.VerificationValidMinutes)
 | 
			
		||||
	err := common.SendEmail(subject, email, content)
 | 
			
		||||
	err := message.SendEmail(subject, email, content)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		c.JSON(http.StatusOK, gin.H{
 | 
			
		||||
			"success": false,
 | 
			
		||||
@@ -149,7 +150,7 @@ func SendPasswordResetEmail(c *gin.Context) {
 | 
			
		||||
		"<p>点击 <a href='%s'>此处</a> 进行密码重置。</p>"+
 | 
			
		||||
		"<p>如果链接无法点击,请尝试点击下面的链接或将其复制到浏览器中打开:<br> %s </p>"+
 | 
			
		||||
		"<p>重置链接 %d 分钟内有效,如果不是本人操作,请忽略。</p>", config.SystemName, link, link, common.VerificationValidMinutes)
 | 
			
		||||
	err := common.SendEmail(subject, email, content)
 | 
			
		||||
	err := message.SendEmail(subject, email, content)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		c.JSON(http.StatusOK, gin.H{
 | 
			
		||||
			"success": false,
 | 
			
		||||
 
 | 
			
		||||
@@ -3,7 +3,13 @@ package controller
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channel/openai"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/constant"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/helper"
 | 
			
		||||
	relaymodel "github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/util"
 | 
			
		||||
	"net/http"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// https://platform.openai.com/docs/api-reference/models/list
 | 
			
		||||
@@ -35,6 +41,7 @@ type OpenAIModels struct {
 | 
			
		||||
 | 
			
		||||
var openAIModels []OpenAIModels
 | 
			
		||||
var openAIModelsMap map[string]OpenAIModels
 | 
			
		||||
var channelId2Models map[int][]string
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	var permission []OpenAIModelPermission
 | 
			
		||||
@@ -53,597 +60,63 @@ func init() {
 | 
			
		||||
		IsBlocking:         false,
 | 
			
		||||
	})
 | 
			
		||||
	// https://platform.openai.com/docs/models/model-endpoint-compatibility
 | 
			
		||||
	openAIModels = []OpenAIModels{
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "dall-e-2",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "dall-e-2",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "dall-e-3",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "dall-e-3",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "whisper-1",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "whisper-1",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "tts-1",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "tts-1",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "tts-1-1106",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "tts-1-1106",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "tts-1-hd",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "tts-1-hd",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "tts-1-hd-1106",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "tts-1-hd-1106",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-3.5-turbo",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-3.5-turbo",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-3.5-turbo-0301",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-3.5-turbo-0301",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-3.5-turbo-0613",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-3.5-turbo-0613",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-3.5-turbo-16k",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-3.5-turbo-16k",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-3.5-turbo-16k-0613",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-3.5-turbo-16k-0613",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-3.5-turbo-1106",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1699593571,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-3.5-turbo-1106",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-3.5-turbo-0125",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1706232090,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-3.5-turbo-0125",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-3.5-turbo-instruct",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-3.5-turbo-instruct",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-4",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-4",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-4-0314",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-4-0314",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-4-0613",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-4-0613",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-4-32k",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-4-32k",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-4-32k-0314",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-4-32k-0314",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-4-32k-0613",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-4-32k-0613",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-4-1106-preview",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1699593571,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-4-1106-preview",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-4-0125-preview",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1706232090,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-4-0125-preview",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-4-turbo-preview",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1706232090,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-4-turbo-preview",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gpt-4-vision-preview",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1699593571,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gpt-4-vision-preview",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "text-embedding-ada-002",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "text-embedding-ada-002",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "text-embedding-3-small",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1706232090,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "text-embedding-3-small",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "text-embedding-3-large",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1706232090,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "text-embedding-3-large",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "text-davinci-003",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "text-davinci-003",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "text-davinci-002",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "text-davinci-002",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "text-curie-001",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "text-curie-001",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "text-babbage-001",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "text-babbage-001",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "text-ada-001",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "text-ada-001",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "text-moderation-latest",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "text-moderation-latest",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "text-moderation-stable",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "text-moderation-stable",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "text-davinci-edit-001",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "text-davinci-edit-001",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "code-davinci-edit-001",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "code-davinci-edit-001",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "davinci-002",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "davinci-002",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "babbage-002",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "openai",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "babbage-002",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "claude-instant-1",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "anthropic",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "claude-instant-1",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "claude-2",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "anthropic",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "claude-2",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "claude-2.1",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "anthropic",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "claude-2.1",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "claude-2.0",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "anthropic",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "claude-2.0",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "ERNIE-Bot",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "baidu",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "ERNIE-Bot",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "ERNIE-Bot-turbo",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "baidu",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "ERNIE-Bot-turbo",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "ERNIE-Bot-4",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "baidu",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "ERNIE-Bot-4",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "Embedding-V1",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "baidu",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "Embedding-V1",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "PaLM-2",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "google palm",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "PaLM-2",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gemini-pro",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "google gemini",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gemini-pro",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "gemini-pro-vision",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "google gemini",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "gemini-pro-vision",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "chatglm_turbo",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "zhipu",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "chatglm_turbo",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "chatglm_pro",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "zhipu",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "chatglm_pro",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "chatglm_std",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "zhipu",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "chatglm_std",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "chatglm_lite",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "zhipu",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "chatglm_lite",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "qwen-turbo",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "ali",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "qwen-turbo",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "qwen-plus",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "ali",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "qwen-plus",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "qwen-max",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "ali",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "qwen-max",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "qwen-max-longcontext",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "ali",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "qwen-max-longcontext",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "text-embedding-v1",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "ali",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "text-embedding-v1",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "SparkDesk",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "xunfei",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "SparkDesk",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "360GPT_S2_V9",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "360",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "360GPT_S2_V9",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "embedding-bert-512-v1",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "360",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "embedding-bert-512-v1",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "embedding_s1_v1",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "360",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "embedding_s1_v1",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "semantic_similarity_s1_v1",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "360",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "semantic_similarity_s1_v1",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Id:         "hunyuan",
 | 
			
		||||
			Object:     "model",
 | 
			
		||||
			Created:    1677649963,
 | 
			
		||||
			OwnedBy:    "tencent",
 | 
			
		||||
			Permission: permission,
 | 
			
		||||
			Root:       "hunyuan",
 | 
			
		||||
			Parent:     nil,
 | 
			
		||||
		},
 | 
			
		||||
	for i := 0; i < constant.APITypeDummy; i++ {
 | 
			
		||||
		if i == constant.APITypeAIProxyLibrary {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		adaptor := helper.GetAdaptor(i)
 | 
			
		||||
		channelName := adaptor.GetChannelName()
 | 
			
		||||
		modelNames := adaptor.GetModelList()
 | 
			
		||||
		for _, modelName := range modelNames {
 | 
			
		||||
			openAIModels = append(openAIModels, OpenAIModels{
 | 
			
		||||
				Id:         modelName,
 | 
			
		||||
				Object:     "model",
 | 
			
		||||
				Created:    1626777600,
 | 
			
		||||
				OwnedBy:    channelName,
 | 
			
		||||
				Permission: permission,
 | 
			
		||||
				Root:       modelName,
 | 
			
		||||
				Parent:     nil,
 | 
			
		||||
			})
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	for _, channelType := range openai.CompatibleChannels {
 | 
			
		||||
		if channelType == common.ChannelTypeAzure {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		channelName, channelModelList := openai.GetCompatibleChannelMeta(channelType)
 | 
			
		||||
		for _, modelName := range channelModelList {
 | 
			
		||||
			openAIModels = append(openAIModels, OpenAIModels{
 | 
			
		||||
				Id:         modelName,
 | 
			
		||||
				Object:     "model",
 | 
			
		||||
				Created:    1626777600,
 | 
			
		||||
				OwnedBy:    channelName,
 | 
			
		||||
				Permission: permission,
 | 
			
		||||
				Root:       modelName,
 | 
			
		||||
				Parent:     nil,
 | 
			
		||||
			})
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	openAIModelsMap = make(map[string]OpenAIModels)
 | 
			
		||||
	for _, model := range openAIModels {
 | 
			
		||||
		openAIModelsMap[model.Id] = model
 | 
			
		||||
	}
 | 
			
		||||
	channelId2Models = make(map[int][]string)
 | 
			
		||||
	for i := 1; i < common.ChannelTypeDummy; i++ {
 | 
			
		||||
		adaptor := helper.GetAdaptor(constant.ChannelType2APIType(i))
 | 
			
		||||
		meta := &util.RelayMeta{
 | 
			
		||||
			ChannelType: i,
 | 
			
		||||
		}
 | 
			
		||||
		adaptor.Init(meta)
 | 
			
		||||
		channelId2Models[i] = adaptor.GetModelList()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func DashboardListModels(c *gin.Context) {
 | 
			
		||||
	c.JSON(http.StatusOK, gin.H{
 | 
			
		||||
		"success": true,
 | 
			
		||||
		"message": "",
 | 
			
		||||
		"data":    channelId2Models,
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func ListModels(c *gin.Context) {
 | 
			
		||||
@@ -658,7 +131,7 @@ func RetrieveModel(c *gin.Context) {
 | 
			
		||||
	if model, ok := openAIModelsMap[modelId]; ok {
 | 
			
		||||
		c.JSON(200, model)
 | 
			
		||||
	} else {
 | 
			
		||||
		Error := openai.Error{
 | 
			
		||||
		Error := relaymodel.Error{
 | 
			
		||||
			Message: fmt.Sprintf("The model '%s' does not exist", modelId),
 | 
			
		||||
			Type:    "invalid_request_error",
 | 
			
		||||
			Param:   "model",
 | 
			
		||||
 
 | 
			
		||||
@@ -2,10 +2,10 @@ package controller
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/model"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
 
 | 
			
		||||
@@ -2,10 +2,10 @@ package controller
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/model"
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,24 +1,29 @@
 | 
			
		||||
package controller
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/middleware"
 | 
			
		||||
	dbmodel "github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/monitor"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/constant"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/controller"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/util"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
	"one-api/relay/constant"
 | 
			
		||||
	"one-api/relay/controller"
 | 
			
		||||
	"one-api/relay/util"
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// https://platform.openai.com/docs/api-reference/chat
 | 
			
		||||
 | 
			
		||||
func Relay(c *gin.Context) {
 | 
			
		||||
	relayMode := constant.Path2RelayMode(c.Request.URL.Path)
 | 
			
		||||
	var err *openai.ErrorWithStatusCode
 | 
			
		||||
func relay(c *gin.Context, relayMode int) *model.ErrorWithStatusCode {
 | 
			
		||||
	var err *model.ErrorWithStatusCode
 | 
			
		||||
	switch relayMode {
 | 
			
		||||
	case constant.RelayModeImagesGenerations:
 | 
			
		||||
		err = controller.RelayImageHelper(c, relayMode)
 | 
			
		||||
@@ -29,39 +34,99 @@ func Relay(c *gin.Context) {
 | 
			
		||||
	case constant.RelayModeAudioTranscription:
 | 
			
		||||
		err = controller.RelayAudioHelper(c, relayMode)
 | 
			
		||||
	default:
 | 
			
		||||
		err = controller.RelayTextHelper(c, relayMode)
 | 
			
		||||
		err = controller.RelayTextHelper(c)
 | 
			
		||||
	}
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		requestId := c.GetString(logger.RequestIdKey)
 | 
			
		||||
		retryTimesStr := c.Query("retry")
 | 
			
		||||
		retryTimes, _ := strconv.Atoi(retryTimesStr)
 | 
			
		||||
		if retryTimesStr == "" {
 | 
			
		||||
			retryTimes = config.RetryTimes
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Relay(c *gin.Context) {
 | 
			
		||||
	ctx := c.Request.Context()
 | 
			
		||||
	relayMode := constant.Path2RelayMode(c.Request.URL.Path)
 | 
			
		||||
	if config.DebugEnabled {
 | 
			
		||||
		requestBody, _ := common.GetRequestBody(c)
 | 
			
		||||
		logger.Debugf(ctx, "request body: %s", string(requestBody))
 | 
			
		||||
	}
 | 
			
		||||
	channelId := c.GetInt("channel_id")
 | 
			
		||||
	bizErr := relay(c, relayMode)
 | 
			
		||||
	if bizErr == nil {
 | 
			
		||||
		monitor.Emit(channelId, true)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	lastFailedChannelId := channelId
 | 
			
		||||
	channelName := c.GetString("channel_name")
 | 
			
		||||
	group := c.GetString("group")
 | 
			
		||||
	originalModel := c.GetString("original_model")
 | 
			
		||||
	go processChannelRelayError(ctx, channelId, channelName, bizErr)
 | 
			
		||||
	requestId := c.GetString(logger.RequestIdKey)
 | 
			
		||||
	retryTimes := config.RetryTimes
 | 
			
		||||
	if !shouldRetry(c, bizErr.StatusCode) {
 | 
			
		||||
		logger.Errorf(ctx, "relay error happen, status code is %d, won't retry in this case", bizErr.StatusCode)
 | 
			
		||||
		retryTimes = 0
 | 
			
		||||
	}
 | 
			
		||||
	for i := retryTimes; i > 0; i-- {
 | 
			
		||||
		channel, err := dbmodel.CacheGetRandomSatisfiedChannel(group, originalModel, i != retryTimes)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			logger.Errorf(ctx, "CacheGetRandomSatisfiedChannel failed: %w", err)
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
		if retryTimes > 0 {
 | 
			
		||||
			c.Redirect(http.StatusTemporaryRedirect, fmt.Sprintf("%s?retry=%d", c.Request.URL.Path, retryTimes-1))
 | 
			
		||||
		} else {
 | 
			
		||||
			if err.StatusCode == http.StatusTooManyRequests {
 | 
			
		||||
				err.Error.Message = "当前分组上游负载已饱和,请稍后再试"
 | 
			
		||||
			}
 | 
			
		||||
			err.Error.Message = helper.MessageWithRequestId(err.Error.Message, requestId)
 | 
			
		||||
			c.JSON(err.StatusCode, gin.H{
 | 
			
		||||
				"error": err.Error,
 | 
			
		||||
			})
 | 
			
		||||
		logger.Infof(ctx, "using channel #%d to retry (remain times %d)", channel.Id, i)
 | 
			
		||||
		if channel.Id == lastFailedChannelId {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		middleware.SetupContextForSelectedChannel(c, channel, originalModel)
 | 
			
		||||
		requestBody, err := common.GetRequestBody(c)
 | 
			
		||||
		c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
 | 
			
		||||
		bizErr = relay(c, relayMode)
 | 
			
		||||
		if bizErr == nil {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		channelId := c.GetInt("channel_id")
 | 
			
		||||
		logger.Error(c.Request.Context(), fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message))
 | 
			
		||||
		// https://platform.openai.com/docs/guides/error-codes/api-errors
 | 
			
		||||
		if util.ShouldDisableChannel(&err.Error, err.StatusCode) {
 | 
			
		||||
			channelId := c.GetInt("channel_id")
 | 
			
		||||
			channelName := c.GetString("channel_name")
 | 
			
		||||
			disableChannel(channelId, channelName, err.Message)
 | 
			
		||||
		lastFailedChannelId = channelId
 | 
			
		||||
		channelName := c.GetString("channel_name")
 | 
			
		||||
		go processChannelRelayError(ctx, channelId, channelName, bizErr)
 | 
			
		||||
	}
 | 
			
		||||
	if bizErr != nil {
 | 
			
		||||
		if bizErr.StatusCode == http.StatusTooManyRequests {
 | 
			
		||||
			bizErr.Error.Message = "当前分组上游负载已饱和,请稍后再试"
 | 
			
		||||
		}
 | 
			
		||||
		bizErr.Error.Message = helper.MessageWithRequestId(bizErr.Error.Message, requestId)
 | 
			
		||||
		c.JSON(bizErr.StatusCode, gin.H{
 | 
			
		||||
			"error": bizErr.Error,
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func shouldRetry(c *gin.Context, statusCode int) bool {
 | 
			
		||||
	if _, ok := c.Get("specific_channel_id"); ok {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	if statusCode == http.StatusTooManyRequests {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	if statusCode/100 == 5 {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	if statusCode == http.StatusBadRequest {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	if statusCode/100 == 2 {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func processChannelRelayError(ctx context.Context, channelId int, channelName string, err *model.ErrorWithStatusCode) {
 | 
			
		||||
	logger.Errorf(ctx, "relay error (channel #%d): %s", channelId, err.Message)
 | 
			
		||||
	// https://platform.openai.com/docs/guides/error-codes/api-errors
 | 
			
		||||
	if util.ShouldDisableChannel(&err.Error, err.StatusCode) {
 | 
			
		||||
		monitor.DisableChannel(channelId, channelName, err.Message)
 | 
			
		||||
	} else {
 | 
			
		||||
		monitor.Emit(channelId, false)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func RelayNotImplemented(c *gin.Context) {
 | 
			
		||||
	err := openai.Error{
 | 
			
		||||
	err := model.Error{
 | 
			
		||||
		Message: "API not implemented",
 | 
			
		||||
		Type:    "one_api_error",
 | 
			
		||||
		Param:   "",
 | 
			
		||||
@@ -73,7 +138,7 @@ func RelayNotImplemented(c *gin.Context) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func RelayNotFound(c *gin.Context) {
 | 
			
		||||
	err := openai.Error{
 | 
			
		||||
	err := model.Error{
 | 
			
		||||
		Message: fmt.Sprintf("Invalid URL (%s %s)", c.Request.Method, c.Request.URL.Path),
 | 
			
		||||
		Type:    "invalid_request_error",
 | 
			
		||||
		Param:   "",
 | 
			
		||||
 
 | 
			
		||||
@@ -2,11 +2,11 @@ package controller
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/model"
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -3,11 +3,11 @@ package controller
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/model"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -5,10 +5,10 @@ import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/model"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								go.mod
									
									
									
									
									
								
							@@ -1,4 +1,4 @@
 | 
			
		||||
module one-api
 | 
			
		||||
module github.com/songquanpeng/one-api
 | 
			
		||||
 | 
			
		||||
// +heroku goVersion go1.18
 | 
			
		||||
go 1.18
 | 
			
		||||
 
 | 
			
		||||
@@ -456,6 +456,7 @@
 | 
			
		||||
  "已绑定的邮箱账户": "Email Account Bound",
 | 
			
		||||
  "用户信息更新成功!": "User information updated successfully!",
 | 
			
		||||
  "模型倍率 %.2f,分组倍率 %.2f": "model rate %.2f, group rate %.2f",
 | 
			
		||||
  "模型倍率 %.2f,分组倍率 %.2f,补全倍率 %.2f": "model rate %.2f, group rate %.2f, completion rate %.2f",
 | 
			
		||||
  "使用明细(总消耗额度:{renderQuota(stat.quota)})": "Usage Details (Total Consumption Quota: {renderQuota(stat.quota)})",
 | 
			
		||||
  "用户名称": "User Name",
 | 
			
		||||
  "令牌名称": "Token Name",
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										26
									
								
								main.go
									
									
									
									
									
								
							
							
						
						
									
										26
									
								
								main.go
									
									
									
									
									
								
							@@ -6,14 +6,14 @@ import (
 | 
			
		||||
	"github.com/gin-contrib/sessions"
 | 
			
		||||
	"github.com/gin-contrib/sessions/cookie"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"one-api/controller"
 | 
			
		||||
	"one-api/middleware"
 | 
			
		||||
	"one-api/model"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
	"one-api/router"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/controller"
 | 
			
		||||
	"github.com/songquanpeng/one-api/middleware"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channel/openai"
 | 
			
		||||
	"github.com/songquanpeng/one-api/router"
 | 
			
		||||
	"os"
 | 
			
		||||
	"strconv"
 | 
			
		||||
)
 | 
			
		||||
@@ -64,13 +64,6 @@ func main() {
 | 
			
		||||
		go model.SyncOptions(config.SyncFrequency)
 | 
			
		||||
		go model.SyncChannelCache(config.SyncFrequency)
 | 
			
		||||
	}
 | 
			
		||||
	if os.Getenv("CHANNEL_UPDATE_FREQUENCY") != "" {
 | 
			
		||||
		frequency, err := strconv.Atoi(os.Getenv("CHANNEL_UPDATE_FREQUENCY"))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			logger.FatalLog("failed to parse CHANNEL_UPDATE_FREQUENCY: " + err.Error())
 | 
			
		||||
		}
 | 
			
		||||
		go controller.AutomaticallyUpdateChannels(frequency)
 | 
			
		||||
	}
 | 
			
		||||
	if os.Getenv("CHANNEL_TEST_FREQUENCY") != "" {
 | 
			
		||||
		frequency, err := strconv.Atoi(os.Getenv("CHANNEL_TEST_FREQUENCY"))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
@@ -83,6 +76,9 @@ func main() {
 | 
			
		||||
		logger.SysLog("batch update enabled with interval " + strconv.Itoa(config.BatchUpdateInterval) + "s")
 | 
			
		||||
		model.InitBatchUpdater()
 | 
			
		||||
	}
 | 
			
		||||
	if config.EnableMetric {
 | 
			
		||||
		logger.SysLog("metric enabled, will disable channel if too much request failed")
 | 
			
		||||
	}
 | 
			
		||||
	openai.InitTokenEncoders()
 | 
			
		||||
 | 
			
		||||
	// Initialize HTTP server
 | 
			
		||||
 
 | 
			
		||||
@@ -3,9 +3,10 @@ package middleware
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/gin-contrib/sessions"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/blacklist"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/model"
 | 
			
		||||
	"strings"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@@ -42,11 +43,14 @@ func authHelper(c *gin.Context, minRole int) {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if status.(int) == common.UserStatusDisabled {
 | 
			
		||||
	if status.(int) == common.UserStatusDisabled || blacklist.IsUserBanned(id.(int)) {
 | 
			
		||||
		c.JSON(http.StatusOK, gin.H{
 | 
			
		||||
			"success": false,
 | 
			
		||||
			"message": "用户已被封禁",
 | 
			
		||||
		})
 | 
			
		||||
		session := sessions.Default(c)
 | 
			
		||||
		session.Clear()
 | 
			
		||||
		_ = session.Save()
 | 
			
		||||
		c.Abort()
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
@@ -99,7 +103,7 @@ func TokenAuth() func(c *gin.Context) {
 | 
			
		||||
			abortWithMessage(c, http.StatusInternalServerError, err.Error())
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		if !userEnabled {
 | 
			
		||||
		if !userEnabled || blacklist.IsUserBanned(token.UserId) {
 | 
			
		||||
			abortWithMessage(c, http.StatusForbidden, "用户已被封禁")
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
@@ -108,7 +112,7 @@ func TokenAuth() func(c *gin.Context) {
 | 
			
		||||
		c.Set("token_name", token.Name)
 | 
			
		||||
		if len(parts) > 1 {
 | 
			
		||||
			if model.IsAdmin(token.UserId) {
 | 
			
		||||
				c.Set("channelId", parts[1])
 | 
			
		||||
				c.Set("specific_channel_id", parts[1])
 | 
			
		||||
			} else {
 | 
			
		||||
				abortWithMessage(c, http.StatusForbidden, "普通用户不支持指定渠道")
 | 
			
		||||
				return
 | 
			
		||||
 
 | 
			
		||||
@@ -2,10 +2,10 @@ package middleware
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"one-api/model"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
@@ -21,8 +21,9 @@ func Distribute() func(c *gin.Context) {
 | 
			
		||||
		userId := c.GetInt("id")
 | 
			
		||||
		userGroup, _ := model.CacheGetUserGroup(userId)
 | 
			
		||||
		c.Set("group", userGroup)
 | 
			
		||||
		var requestModel string
 | 
			
		||||
		var channel *model.Channel
 | 
			
		||||
		channelId, ok := c.Get("channelId")
 | 
			
		||||
		channelId, ok := c.Get("specific_channel_id")
 | 
			
		||||
		if ok {
 | 
			
		||||
			id, err := strconv.Atoi(channelId.(string))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
@@ -66,7 +67,8 @@ func Distribute() func(c *gin.Context) {
 | 
			
		||||
					modelRequest.Model = "whisper-1"
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			channel, err = model.CacheGetRandomSatisfiedChannel(userGroup, modelRequest.Model)
 | 
			
		||||
			requestModel = modelRequest.Model
 | 
			
		||||
			channel, err = model.CacheGetRandomSatisfiedChannel(userGroup, modelRequest.Model, false)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				message := fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道", userGroup, modelRequest.Model)
 | 
			
		||||
				if channel != nil {
 | 
			
		||||
@@ -77,24 +79,34 @@ func Distribute() func(c *gin.Context) {
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		c.Set("channel", channel.Type)
 | 
			
		||||
		c.Set("channel_id", channel.Id)
 | 
			
		||||
		c.Set("channel_name", channel.Name)
 | 
			
		||||
		c.Set("model_mapping", channel.GetModelMapping())
 | 
			
		||||
		c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
 | 
			
		||||
		c.Set("base_url", channel.GetBaseURL())
 | 
			
		||||
		switch channel.Type {
 | 
			
		||||
		case common.ChannelTypeAzure:
 | 
			
		||||
			c.Set("api_version", channel.Other)
 | 
			
		||||
		case common.ChannelTypeXunfei:
 | 
			
		||||
			c.Set("api_version", channel.Other)
 | 
			
		||||
		case common.ChannelTypeGemini:
 | 
			
		||||
			c.Set("api_version", channel.Other)
 | 
			
		||||
		case common.ChannelTypeAIProxyLibrary:
 | 
			
		||||
			c.Set("library_id", channel.Other)
 | 
			
		||||
		case common.ChannelTypeAli:
 | 
			
		||||
			c.Set("plugin", channel.Other)
 | 
			
		||||
		}
 | 
			
		||||
		SetupContextForSelectedChannel(c, channel, requestModel)
 | 
			
		||||
		c.Next()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func SetupContextForSelectedChannel(c *gin.Context, channel *model.Channel, modelName string) {
 | 
			
		||||
	c.Set("channel", channel.Type)
 | 
			
		||||
	c.Set("channel_id", channel.Id)
 | 
			
		||||
	c.Set("channel_name", channel.Name)
 | 
			
		||||
	c.Set("model_mapping", channel.GetModelMapping())
 | 
			
		||||
	c.Set("original_model", modelName) // for retry
 | 
			
		||||
	c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
 | 
			
		||||
	c.Set("base_url", channel.GetBaseURL())
 | 
			
		||||
	// this is for backward compatibility
 | 
			
		||||
	switch channel.Type {
 | 
			
		||||
	case common.ChannelTypeAzure:
 | 
			
		||||
		c.Set(common.ConfigKeyAPIVersion, channel.Other)
 | 
			
		||||
	case common.ChannelTypeXunfei:
 | 
			
		||||
		c.Set(common.ConfigKeyAPIVersion, channel.Other)
 | 
			
		||||
	case common.ChannelTypeGemini:
 | 
			
		||||
		c.Set(common.ConfigKeyAPIVersion, channel.Other)
 | 
			
		||||
	case common.ChannelTypeAIProxyLibrary:
 | 
			
		||||
		c.Set(common.ConfigKeyLibraryID, channel.Other)
 | 
			
		||||
	case common.ChannelTypeAli:
 | 
			
		||||
		c.Set(common.ConfigKeyPlugin, channel.Other)
 | 
			
		||||
	}
 | 
			
		||||
	cfg, _ := channel.LoadConfig()
 | 
			
		||||
	for k, v := range cfg {
 | 
			
		||||
		c.Set(common.ConfigKeyPrefix+k, v)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -3,7 +3,7 @@ package middleware
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func SetUpLogger(server *gin.Engine) {
 | 
			
		||||
 
 | 
			
		||||
@@ -4,9 +4,9 @@ import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -3,8 +3,9 @@ package middleware
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"runtime/debug"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@@ -12,11 +13,15 @@ func RelayPanicRecover() gin.HandlerFunc {
 | 
			
		||||
	return func(c *gin.Context) {
 | 
			
		||||
		defer func() {
 | 
			
		||||
			if err := recover(); err != nil {
 | 
			
		||||
				logger.SysError(fmt.Sprintf("panic detected: %v", err))
 | 
			
		||||
				logger.SysError(fmt.Sprintf("stacktrace from panic: %s", string(debug.Stack())))
 | 
			
		||||
				ctx := c.Request.Context()
 | 
			
		||||
				logger.Errorf(ctx, fmt.Sprintf("panic detected: %v", err))
 | 
			
		||||
				logger.Errorf(ctx, fmt.Sprintf("stacktrace from panic: %s", string(debug.Stack())))
 | 
			
		||||
				logger.Errorf(ctx, fmt.Sprintf("request: %s %s", c.Request.Method, c.Request.URL.Path))
 | 
			
		||||
				body, _ := common.GetRequestBody(c)
 | 
			
		||||
				logger.Errorf(ctx, fmt.Sprintf("request body: %s", string(body)))
 | 
			
		||||
				c.JSON(http.StatusInternalServerError, gin.H{
 | 
			
		||||
					"error": gin.H{
 | 
			
		||||
						"message": fmt.Sprintf("Panic detected, error: %v. Please submit a issue here: https://github.com/songquanpeng/one-api", err),
 | 
			
		||||
						"message": fmt.Sprintf("Panic detected, error: %v. Please submit an issue with the related log here: https://github.com/songquanpeng/one-api", err),
 | 
			
		||||
						"type":    "one_api_panic",
 | 
			
		||||
					},
 | 
			
		||||
				})
 | 
			
		||||
 
 | 
			
		||||
@@ -3,13 +3,13 @@ package middleware
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func RequestId() func(c *gin.Context) {
 | 
			
		||||
	return func(c *gin.Context) {
 | 
			
		||||
		id := helper.GetTimeString() + helper.GetRandomString(8)
 | 
			
		||||
		id := helper.GenRequestID()
 | 
			
		||||
		c.Set(logger.RequestIdKey, id)
 | 
			
		||||
		ctx := context.WithValue(c.Request.Context(), logger.RequestIdKey, id)
 | 
			
		||||
		c.Request = c.Request.WithContext(ctx)
 | 
			
		||||
 
 | 
			
		||||
@@ -4,10 +4,10 @@ import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"github.com/gin-contrib/sessions"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"net/url"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type turnstileCheckResponse struct {
 | 
			
		||||
 
 | 
			
		||||
@@ -2,8 +2,8 @@ package middleware
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func abortWithMessage(c *gin.Context, statusCode int, message string) {
 | 
			
		||||
 
 | 
			
		||||
@@ -1,7 +1,7 @@
 | 
			
		||||
package model
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"strings"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,13 +1,14 @@
 | 
			
		||||
package model
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"math/rand"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"sort"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
@@ -70,31 +71,42 @@ func CacheGetUserGroup(id int) (group string, err error) {
 | 
			
		||||
	return group, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func CacheGetUserQuota(id int) (quota int, err error) {
 | 
			
		||||
func fetchAndUpdateUserQuota(ctx context.Context, id int) (quota int64, err error) {
 | 
			
		||||
	quota, err = GetUserQuota(id)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return 0, err
 | 
			
		||||
	}
 | 
			
		||||
	err = common.RedisSet(fmt.Sprintf("user_quota:%d", id), fmt.Sprintf("%d", quota), time.Duration(UserId2QuotaCacheSeconds)*time.Second)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		logger.Error(ctx, "Redis set user quota error: "+err.Error())
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func CacheGetUserQuota(ctx context.Context, id int) (quota int64, err error) {
 | 
			
		||||
	if !common.RedisEnabled {
 | 
			
		||||
		return GetUserQuota(id)
 | 
			
		||||
	}
 | 
			
		||||
	quotaString, err := common.RedisGet(fmt.Sprintf("user_quota:%d", id))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		quota, err = GetUserQuota(id)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return 0, err
 | 
			
		||||
		}
 | 
			
		||||
		err = common.RedisSet(fmt.Sprintf("user_quota:%d", id), fmt.Sprintf("%d", quota), time.Duration(UserId2QuotaCacheSeconds)*time.Second)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			logger.SysError("Redis set user quota error: " + err.Error())
 | 
			
		||||
		}
 | 
			
		||||
		return quota, err
 | 
			
		||||
		return fetchAndUpdateUserQuota(ctx, id)
 | 
			
		||||
	}
 | 
			
		||||
	quota, err = strconv.Atoi(quotaString)
 | 
			
		||||
	return quota, err
 | 
			
		||||
	quota, err = strconv.ParseInt(quotaString, 10, 64)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return 0, nil
 | 
			
		||||
	}
 | 
			
		||||
	if quota <= config.PreConsumedQuota { // when user's quota is less than pre-consumed quota, we need to fetch from db
 | 
			
		||||
		logger.Infof(ctx, "user %d's cached quota is too low: %d, refreshing from db", quota, id)
 | 
			
		||||
		return fetchAndUpdateUserQuota(ctx, id)
 | 
			
		||||
	}
 | 
			
		||||
	return quota, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func CacheUpdateUserQuota(id int) error {
 | 
			
		||||
func CacheUpdateUserQuota(ctx context.Context, id int) error {
 | 
			
		||||
	if !common.RedisEnabled {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	quota, err := GetUserQuota(id)
 | 
			
		||||
	quota, err := CacheGetUserQuota(ctx, id)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
@@ -102,7 +114,7 @@ func CacheUpdateUserQuota(id int) error {
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func CacheDecreaseUserQuota(id int, quota int) error {
 | 
			
		||||
func CacheDecreaseUserQuota(id int, quota int64) error {
 | 
			
		||||
	if !common.RedisEnabled {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
@@ -191,7 +203,7 @@ func SyncChannelCache(frequency int) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func CacheGetRandomSatisfiedChannel(group string, model string) (*Channel, error) {
 | 
			
		||||
func CacheGetRandomSatisfiedChannel(group string, model string, ignoreFirstPriority bool) (*Channel, error) {
 | 
			
		||||
	if !config.MemoryCacheEnabled {
 | 
			
		||||
		return GetRandomSatisfiedChannel(group, model)
 | 
			
		||||
	}
 | 
			
		||||
@@ -213,5 +225,10 @@ func CacheGetRandomSatisfiedChannel(group string, model string) (*Channel, error
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	idx := rand.Intn(endIdx)
 | 
			
		||||
	if ignoreFirstPriority {
 | 
			
		||||
		if endIdx < len(channels) { // which means there are more than one priority
 | 
			
		||||
			idx = common.RandRange(endIdx, len(channels))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return channels[idx], nil
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -3,17 +3,17 @@ package model
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"gorm.io/gorm"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Channel struct {
 | 
			
		||||
	Id                 int     `json:"id"`
 | 
			
		||||
	Type               int     `json:"type" gorm:"default:0"`
 | 
			
		||||
	Key                string  `json:"key" gorm:"not null;index"`
 | 
			
		||||
	Key                string  `json:"key" gorm:"type:text"`
 | 
			
		||||
	Status             int     `json:"status" gorm:"default:1"`
 | 
			
		||||
	Name               string  `json:"name" gorm:"index"`
 | 
			
		||||
	Weight             *uint   `json:"weight" gorm:"default:0"`
 | 
			
		||||
@@ -21,7 +21,7 @@ type Channel struct {
 | 
			
		||||
	TestTime           int64   `json:"test_time" gorm:"bigint"`
 | 
			
		||||
	ResponseTime       int     `json:"response_time"` // in milliseconds
 | 
			
		||||
	BaseURL            *string `json:"base_url" gorm:"column:base_url;default:''"`
 | 
			
		||||
	Other              string  `json:"other"`
 | 
			
		||||
	Other              string  `json:"other"`   // DEPRECATED: please save config to field Config
 | 
			
		||||
	Balance            float64 `json:"balance"` // in USD
 | 
			
		||||
	BalanceUpdatedTime int64   `json:"balance_updated_time" gorm:"bigint"`
 | 
			
		||||
	Models             string  `json:"models"`
 | 
			
		||||
@@ -29,25 +29,25 @@ type Channel struct {
 | 
			
		||||
	UsedQuota          int64   `json:"used_quota" gorm:"bigint;default:0"`
 | 
			
		||||
	ModelMapping       *string `json:"model_mapping" gorm:"type:varchar(1024);default:''"`
 | 
			
		||||
	Priority           *int64  `json:"priority" gorm:"bigint;default:0"`
 | 
			
		||||
	Config             string  `json:"config"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GetAllChannels(startIdx int, num int, selectAll bool) ([]*Channel, error) {
 | 
			
		||||
func GetAllChannels(startIdx int, num int, scope string) ([]*Channel, error) {
 | 
			
		||||
	var channels []*Channel
 | 
			
		||||
	var err error
 | 
			
		||||
	if selectAll {
 | 
			
		||||
	switch scope {
 | 
			
		||||
	case "all":
 | 
			
		||||
		err = DB.Order("id desc").Find(&channels).Error
 | 
			
		||||
	} else {
 | 
			
		||||
	case "disabled":
 | 
			
		||||
		err = DB.Order("id desc").Where("status = ? or status = ?", common.ChannelStatusAutoDisabled, common.ChannelStatusManuallyDisabled).Find(&channels).Error
 | 
			
		||||
	default:
 | 
			
		||||
		err = DB.Order("id desc").Limit(num).Offset(startIdx).Omit("key").Find(&channels).Error
 | 
			
		||||
	}
 | 
			
		||||
	return channels, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func SearchChannels(keyword string) (channels []*Channel, err error) {
 | 
			
		||||
	keyCol := "`key`"
 | 
			
		||||
	if common.UsingPostgreSQL {
 | 
			
		||||
		keyCol = `"key"`
 | 
			
		||||
	}
 | 
			
		||||
	err = DB.Omit("key").Where("id = ? or name LIKE ? or "+keyCol+" = ?", helper.String2Int(keyword), keyword+"%", keyword).Find(&channels).Error
 | 
			
		||||
	err = DB.Omit("key").Where("id = ? or name LIKE ?", helper.String2Int(keyword), keyword+"%").Find(&channels).Error
 | 
			
		||||
	return channels, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -155,6 +155,18 @@ func (channel *Channel) Delete() error {
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (channel *Channel) LoadConfig() (map[string]string, error) {
 | 
			
		||||
	if channel.Config == "" {
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
	cfg := make(map[string]string)
 | 
			
		||||
	err := json.Unmarshal([]byte(channel.Config), &cfg)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	return cfg, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func UpdateChannelStatusById(id int, status int) {
 | 
			
		||||
	err := UpdateAbilityStatus(id, status == common.ChannelStatusEnabled)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -166,7 +178,7 @@ func UpdateChannelStatusById(id int, status int) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func UpdateChannelUsedQuota(id int, quota int) {
 | 
			
		||||
func UpdateChannelUsedQuota(id int, quota int64) {
 | 
			
		||||
	if config.BatchUpdateEnabled {
 | 
			
		||||
		addNewRecord(BatchUpdateTypeChannelUsedQuota, id, quota)
 | 
			
		||||
		return
 | 
			
		||||
@@ -174,7 +186,7 @@ func UpdateChannelUsedQuota(id int, quota int) {
 | 
			
		||||
	updateChannelUsedQuota(id, quota)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func updateChannelUsedQuota(id int, quota int) {
 | 
			
		||||
func updateChannelUsedQuota(id int, quota int64) {
 | 
			
		||||
	err := DB.Model(&Channel{}).Where("id = ?", id).Update("used_quota", gorm.Expr("used_quota + ?", quota)).Error
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		logger.SysError("failed to update channel used quota: " + err.Error())
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										14
									
								
								model/log.go
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								model/log.go
									
									
									
									
									
								
							@@ -3,10 +3,10 @@ package model
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
 | 
			
		||||
	"gorm.io/gorm"
 | 
			
		||||
)
 | 
			
		||||
@@ -51,7 +51,7 @@ func RecordLog(userId int, logType int, content string) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptTokens int, completionTokens int, modelName string, tokenName string, quota int, content string) {
 | 
			
		||||
func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptTokens int, completionTokens int, modelName string, tokenName string, quota int64, content string) {
 | 
			
		||||
	logger.Info(ctx, fmt.Sprintf("record consume log: userId=%d, channelId=%d, promptTokens=%d, completionTokens=%d, modelName=%s, tokenName=%s, quota=%d, content=%s", userId, channelId, promptTokens, completionTokens, modelName, tokenName, quota, content))
 | 
			
		||||
	if !config.LogConsumeEnabled {
 | 
			
		||||
		return
 | 
			
		||||
@@ -66,7 +66,7 @@ func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptToke
 | 
			
		||||
		CompletionTokens: completionTokens,
 | 
			
		||||
		TokenName:        tokenName,
 | 
			
		||||
		ModelName:        modelName,
 | 
			
		||||
		Quota:            quota,
 | 
			
		||||
		Quota:            int(quota),
 | 
			
		||||
		ChannelId:        channelId,
 | 
			
		||||
	}
 | 
			
		||||
	err := DB.Create(log).Error
 | 
			
		||||
@@ -137,7 +137,7 @@ func SearchUserLogs(userId int, keyword string) (logs []*Log, err error) {
 | 
			
		||||
	return logs, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func SumUsedQuota(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string, channel int) (quota int) {
 | 
			
		||||
func SumUsedQuota(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string, channel int) (quota int64) {
 | 
			
		||||
	tx := DB.Table("logs").Select("ifnull(sum(quota),0)")
 | 
			
		||||
	if username != "" {
 | 
			
		||||
		tx = tx.Where("username = ?", username)
 | 
			
		||||
 
 | 
			
		||||
@@ -2,14 +2,15 @@ package model
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/env"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"gorm.io/driver/mysql"
 | 
			
		||||
	"gorm.io/driver/postgres"
 | 
			
		||||
	"gorm.io/driver/sqlite"
 | 
			
		||||
	"gorm.io/gorm"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"os"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
@@ -56,6 +57,7 @@ func chooseDB() (*gorm.DB, error) {
 | 
			
		||||
		}
 | 
			
		||||
		// Use MySQL
 | 
			
		||||
		logger.SysLog("using MySQL as database")
 | 
			
		||||
		common.UsingMySQL = true
 | 
			
		||||
		return gorm.Open(mysql.Open(dsn), &gorm.Config{
 | 
			
		||||
			PrepareStmt: true, // precompile SQL
 | 
			
		||||
		})
 | 
			
		||||
@@ -72,7 +74,7 @@ func chooseDB() (*gorm.DB, error) {
 | 
			
		||||
func InitDB() (err error) {
 | 
			
		||||
	db, err := chooseDB()
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		if config.DebugEnabled {
 | 
			
		||||
		if config.DebugSQLEnabled {
 | 
			
		||||
			db = db.Debug()
 | 
			
		||||
		}
 | 
			
		||||
		DB = db
 | 
			
		||||
@@ -80,13 +82,16 @@ func InitDB() (err error) {
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		sqlDB.SetMaxIdleConns(helper.GetOrDefaultEnvInt("SQL_MAX_IDLE_CONNS", 100))
 | 
			
		||||
		sqlDB.SetMaxOpenConns(helper.GetOrDefaultEnvInt("SQL_MAX_OPEN_CONNS", 1000))
 | 
			
		||||
		sqlDB.SetConnMaxLifetime(time.Second * time.Duration(helper.GetOrDefaultEnvInt("SQL_MAX_LIFETIME", 60)))
 | 
			
		||||
		sqlDB.SetMaxIdleConns(env.Int("SQL_MAX_IDLE_CONNS", 100))
 | 
			
		||||
		sqlDB.SetMaxOpenConns(env.Int("SQL_MAX_OPEN_CONNS", 1000))
 | 
			
		||||
		sqlDB.SetConnMaxLifetime(time.Second * time.Duration(env.Int("SQL_MAX_LIFETIME", 60)))
 | 
			
		||||
 | 
			
		||||
		if !config.IsMasterNode {
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		if common.UsingMySQL {
 | 
			
		||||
			_, _ = sqlDB.Exec("DROP INDEX idx_channels_key ON channels;") // TODO: delete this line when most users have upgraded
 | 
			
		||||
		}
 | 
			
		||||
		logger.SysLog("database migration started")
 | 
			
		||||
		err = db.AutoMigrate(&Channel{})
 | 
			
		||||
		if err != nil {
 | 
			
		||||
 
 | 
			
		||||
@@ -1,9 +1,9 @@
 | 
			
		||||
package model
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
@@ -57,13 +57,15 @@ func InitOptionMap() {
 | 
			
		||||
	config.OptionMap["WeChatServerAddress"] = ""
 | 
			
		||||
	config.OptionMap["WeChatServerToken"] = ""
 | 
			
		||||
	config.OptionMap["WeChatAccountQRCodeImageURL"] = ""
 | 
			
		||||
	config.OptionMap["MessagePusherAddress"] = ""
 | 
			
		||||
	config.OptionMap["MessagePusherToken"] = ""
 | 
			
		||||
	config.OptionMap["TurnstileSiteKey"] = ""
 | 
			
		||||
	config.OptionMap["TurnstileSecretKey"] = ""
 | 
			
		||||
	config.OptionMap["QuotaForNewUser"] = strconv.Itoa(config.QuotaForNewUser)
 | 
			
		||||
	config.OptionMap["QuotaForInviter"] = strconv.Itoa(config.QuotaForInviter)
 | 
			
		||||
	config.OptionMap["QuotaForInvitee"] = strconv.Itoa(config.QuotaForInvitee)
 | 
			
		||||
	config.OptionMap["QuotaRemindThreshold"] = strconv.Itoa(config.QuotaRemindThreshold)
 | 
			
		||||
	config.OptionMap["PreConsumedQuota"] = strconv.Itoa(config.PreConsumedQuota)
 | 
			
		||||
	config.OptionMap["QuotaForNewUser"] = strconv.FormatInt(config.QuotaForNewUser, 10)
 | 
			
		||||
	config.OptionMap["QuotaForInviter"] = strconv.FormatInt(config.QuotaForInviter, 10)
 | 
			
		||||
	config.OptionMap["QuotaForInvitee"] = strconv.FormatInt(config.QuotaForInvitee, 10)
 | 
			
		||||
	config.OptionMap["QuotaRemindThreshold"] = strconv.FormatInt(config.QuotaRemindThreshold, 10)
 | 
			
		||||
	config.OptionMap["PreConsumedQuota"] = strconv.FormatInt(config.PreConsumedQuota, 10)
 | 
			
		||||
	config.OptionMap["ModelRatio"] = common.ModelRatio2JSONString()
 | 
			
		||||
	config.OptionMap["GroupRatio"] = common.GroupRatio2JSONString()
 | 
			
		||||
	config.OptionMap["CompletionRatio"] = common.CompletionRatio2JSONString()
 | 
			
		||||
@@ -79,6 +81,9 @@ func InitOptionMap() {
 | 
			
		||||
func loadOptionsFromDatabase() {
 | 
			
		||||
	options, _ := AllOption()
 | 
			
		||||
	for _, option := range options {
 | 
			
		||||
		if option.Key == "ModelRatio" {
 | 
			
		||||
			option.Value = common.AddNewMissingRatio(option.Value)
 | 
			
		||||
		}
 | 
			
		||||
		err := updateOptionMap(option.Key, option.Value)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			logger.SysError("failed to update option map: " + err.Error())
 | 
			
		||||
@@ -179,20 +184,24 @@ func updateOptionMap(key string, value string) (err error) {
 | 
			
		||||
		config.WeChatServerToken = value
 | 
			
		||||
	case "WeChatAccountQRCodeImageURL":
 | 
			
		||||
		config.WeChatAccountQRCodeImageURL = value
 | 
			
		||||
	case "MessagePusherAddress":
 | 
			
		||||
		config.MessagePusherAddress = value
 | 
			
		||||
	case "MessagePusherToken":
 | 
			
		||||
		config.MessagePusherToken = value
 | 
			
		||||
	case "TurnstileSiteKey":
 | 
			
		||||
		config.TurnstileSiteKey = value
 | 
			
		||||
	case "TurnstileSecretKey":
 | 
			
		||||
		config.TurnstileSecretKey = value
 | 
			
		||||
	case "QuotaForNewUser":
 | 
			
		||||
		config.QuotaForNewUser, _ = strconv.Atoi(value)
 | 
			
		||||
		config.QuotaForNewUser, _ = strconv.ParseInt(value, 10, 64)
 | 
			
		||||
	case "QuotaForInviter":
 | 
			
		||||
		config.QuotaForInviter, _ = strconv.Atoi(value)
 | 
			
		||||
		config.QuotaForInviter, _ = strconv.ParseInt(value, 10, 64)
 | 
			
		||||
	case "QuotaForInvitee":
 | 
			
		||||
		config.QuotaForInvitee, _ = strconv.Atoi(value)
 | 
			
		||||
		config.QuotaForInvitee, _ = strconv.ParseInt(value, 10, 64)
 | 
			
		||||
	case "QuotaRemindThreshold":
 | 
			
		||||
		config.QuotaRemindThreshold, _ = strconv.Atoi(value)
 | 
			
		||||
		config.QuotaRemindThreshold, _ = strconv.ParseInt(value, 10, 64)
 | 
			
		||||
	case "PreConsumedQuota":
 | 
			
		||||
		config.PreConsumedQuota, _ = strconv.Atoi(value)
 | 
			
		||||
		config.PreConsumedQuota, _ = strconv.ParseInt(value, 10, 64)
 | 
			
		||||
	case "RetryTimes":
 | 
			
		||||
		config.RetryTimes, _ = strconv.Atoi(value)
 | 
			
		||||
	case "ModelRatio":
 | 
			
		||||
 
 | 
			
		||||
@@ -3,9 +3,9 @@ package model
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"gorm.io/gorm"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Redemption struct {
 | 
			
		||||
@@ -14,7 +14,7 @@ type Redemption struct {
 | 
			
		||||
	Key          string `json:"key" gorm:"type:char(32);uniqueIndex"`
 | 
			
		||||
	Status       int    `json:"status" gorm:"default:1"`
 | 
			
		||||
	Name         string `json:"name" gorm:"index"`
 | 
			
		||||
	Quota        int    `json:"quota" gorm:"default:100"`
 | 
			
		||||
	Quota        int64  `json:"quota" gorm:"default:100"`
 | 
			
		||||
	CreatedTime  int64  `json:"created_time" gorm:"bigint"`
 | 
			
		||||
	RedeemedTime int64  `json:"redeemed_time" gorm:"bigint"`
 | 
			
		||||
	Count        int    `json:"count" gorm:"-:all"` // only for api request
 | 
			
		||||
@@ -42,7 +42,7 @@ func GetRedemptionById(id int) (*Redemption, error) {
 | 
			
		||||
	return &redemption, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Redeem(key string, userId int) (quota int, err error) {
 | 
			
		||||
func Redeem(key string, userId int) (quota int64, err error) {
 | 
			
		||||
	if key == "" {
 | 
			
		||||
		return 0, errors.New("未提供兑换码")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -3,11 +3,12 @@ package model
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/message"
 | 
			
		||||
	"gorm.io/gorm"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Token struct {
 | 
			
		||||
@@ -19,9 +20,9 @@ type Token struct {
 | 
			
		||||
	CreatedTime    int64  `json:"created_time" gorm:"bigint"`
 | 
			
		||||
	AccessedTime   int64  `json:"accessed_time" gorm:"bigint"`
 | 
			
		||||
	ExpiredTime    int64  `json:"expired_time" gorm:"bigint;default:-1"` // -1 means never expired
 | 
			
		||||
	RemainQuota    int    `json:"remain_quota" gorm:"default:0"`
 | 
			
		||||
	RemainQuota    int64  `json:"remain_quota" gorm:"default:0"`
 | 
			
		||||
	UnlimitedQuota bool   `json:"unlimited_quota" gorm:"default:false"`
 | 
			
		||||
	UsedQuota      int    `json:"used_quota" gorm:"default:0"` // used quota
 | 
			
		||||
	UsedQuota      int64  `json:"used_quota" gorm:"default:0"` // used quota
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GetAllUserTokens(userId int, startIdx int, num int) ([]*Token, error) {
 | 
			
		||||
@@ -137,7 +138,7 @@ func DeleteTokenById(id int, userId int) (err error) {
 | 
			
		||||
	return token.Delete()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func IncreaseTokenQuota(id int, quota int) (err error) {
 | 
			
		||||
func IncreaseTokenQuota(id int, quota int64) (err error) {
 | 
			
		||||
	if quota < 0 {
 | 
			
		||||
		return errors.New("quota 不能为负数!")
 | 
			
		||||
	}
 | 
			
		||||
@@ -148,7 +149,7 @@ func IncreaseTokenQuota(id int, quota int) (err error) {
 | 
			
		||||
	return increaseTokenQuota(id, quota)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func increaseTokenQuota(id int, quota int) (err error) {
 | 
			
		||||
func increaseTokenQuota(id int, quota int64) (err error) {
 | 
			
		||||
	err = DB.Model(&Token{}).Where("id = ?", id).Updates(
 | 
			
		||||
		map[string]interface{}{
 | 
			
		||||
			"remain_quota":  gorm.Expr("remain_quota + ?", quota),
 | 
			
		||||
@@ -159,7 +160,7 @@ func increaseTokenQuota(id int, quota int) (err error) {
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func DecreaseTokenQuota(id int, quota int) (err error) {
 | 
			
		||||
func DecreaseTokenQuota(id int, quota int64) (err error) {
 | 
			
		||||
	if quota < 0 {
 | 
			
		||||
		return errors.New("quota 不能为负数!")
 | 
			
		||||
	}
 | 
			
		||||
@@ -170,7 +171,7 @@ func DecreaseTokenQuota(id int, quota int) (err error) {
 | 
			
		||||
	return decreaseTokenQuota(id, quota)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func decreaseTokenQuota(id int, quota int) (err error) {
 | 
			
		||||
func decreaseTokenQuota(id int, quota int64) (err error) {
 | 
			
		||||
	err = DB.Model(&Token{}).Where("id = ?", id).Updates(
 | 
			
		||||
		map[string]interface{}{
 | 
			
		||||
			"remain_quota":  gorm.Expr("remain_quota - ?", quota),
 | 
			
		||||
@@ -181,7 +182,7 @@ func decreaseTokenQuota(id int, quota int) (err error) {
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func PreConsumeTokenQuota(tokenId int, quota int) (err error) {
 | 
			
		||||
func PreConsumeTokenQuota(tokenId int, quota int64) (err error) {
 | 
			
		||||
	if quota < 0 {
 | 
			
		||||
		return errors.New("quota 不能为负数!")
 | 
			
		||||
	}
 | 
			
		||||
@@ -213,7 +214,7 @@ func PreConsumeTokenQuota(tokenId int, quota int) (err error) {
 | 
			
		||||
			}
 | 
			
		||||
			if email != "" {
 | 
			
		||||
				topUpLink := fmt.Sprintf("%s/topup", config.ServerAddress)
 | 
			
		||||
				err = common.SendEmail(prompt, email,
 | 
			
		||||
				err = message.SendEmail(prompt, email,
 | 
			
		||||
					fmt.Sprintf("%s,当前剩余额度为 %d,为了不影响您的使用,请及时充值。<br/>充值链接:<a href='%s'>%s</a>", prompt, userQuota, topUpLink, topUpLink))
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					logger.SysError("failed to send email" + err.Error())
 | 
			
		||||
@@ -231,7 +232,7 @@ func PreConsumeTokenQuota(tokenId int, quota int) (err error) {
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func PostConsumeTokenQuota(tokenId int, quota int) (err error) {
 | 
			
		||||
func PostConsumeTokenQuota(tokenId int, quota int64) (err error) {
 | 
			
		||||
	token, err := GetTokenById(tokenId)
 | 
			
		||||
	if quota > 0 {
 | 
			
		||||
		err = DecreaseUserQuota(token.UserId, quota)
 | 
			
		||||
 
 | 
			
		||||
@@ -3,11 +3,12 @@ package model
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/blacklist"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"gorm.io/gorm"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"strings"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@@ -25,8 +26,8 @@ type User struct {
 | 
			
		||||
	WeChatId         string `json:"wechat_id" gorm:"column:wechat_id;index"`
 | 
			
		||||
	VerificationCode string `json:"verification_code" gorm:"-:all"`                                    // this field is only for Email verification, don't save it to database!
 | 
			
		||||
	AccessToken      string `json:"access_token" gorm:"type:char(32);column:access_token;uniqueIndex"` // this token is for system management
 | 
			
		||||
	Quota            int    `json:"quota" gorm:"type:int;default:0"`
 | 
			
		||||
	UsedQuota        int    `json:"used_quota" gorm:"type:int;default:0;column:used_quota"` // used quota
 | 
			
		||||
	Quota            int64  `json:"quota" gorm:"type:int;default:0"`
 | 
			
		||||
	UsedQuota        int64  `json:"used_quota" gorm:"type:int;default:0;column:used_quota"` // used quota
 | 
			
		||||
	RequestCount     int    `json:"request_count" gorm:"type:int;default:0;"`               // request number
 | 
			
		||||
	Group            string `json:"group" gorm:"type:varchar(32);default:'default'"`
 | 
			
		||||
	AffCode          string `json:"aff_code" gorm:"type:varchar(32);column:aff_code;uniqueIndex"`
 | 
			
		||||
@@ -40,7 +41,7 @@ func GetMaxUserId() int {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GetAllUsers(startIdx int, num int) (users []*User, err error) {
 | 
			
		||||
	err = DB.Order("id desc").Limit(num).Offset(startIdx).Omit("password").Find(&users).Error
 | 
			
		||||
	err = DB.Order("id desc").Limit(num).Offset(startIdx).Omit("password").Where("status != ?", common.UserStatusDeleted).Find(&users).Error
 | 
			
		||||
	return users, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -123,6 +124,11 @@ func (user *User) Update(updatePassword bool) error {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if user.Status == common.UserStatusDisabled {
 | 
			
		||||
		blacklist.BanUser(user.Id)
 | 
			
		||||
	} else if user.Status == common.UserStatusEnabled {
 | 
			
		||||
		blacklist.UnbanUser(user.Id)
 | 
			
		||||
	}
 | 
			
		||||
	err = DB.Model(user).Updates(user).Error
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
@@ -131,7 +137,10 @@ func (user *User) Delete() error {
 | 
			
		||||
	if user.Id == 0 {
 | 
			
		||||
		return errors.New("id 为空!")
 | 
			
		||||
	}
 | 
			
		||||
	err := DB.Delete(user).Error
 | 
			
		||||
	blacklist.BanUser(user.Id)
 | 
			
		||||
	user.Username = fmt.Sprintf("deleted_%s", helper.GetUUID())
 | 
			
		||||
	user.Status = common.UserStatusDeleted
 | 
			
		||||
	err := DB.Model(user).Updates(user).Error
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -265,12 +274,12 @@ func ValidateAccessToken(token string) (user *User) {
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GetUserQuota(id int) (quota int, err error) {
 | 
			
		||||
func GetUserQuota(id int) (quota int64, err error) {
 | 
			
		||||
	err = DB.Model(&User{}).Where("id = ?", id).Select("quota").Find("a).Error
 | 
			
		||||
	return quota, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GetUserUsedQuota(id int) (quota int, err error) {
 | 
			
		||||
func GetUserUsedQuota(id int) (quota int64, err error) {
 | 
			
		||||
	err = DB.Model(&User{}).Where("id = ?", id).Select("used_quota").Find("a).Error
 | 
			
		||||
	return quota, err
 | 
			
		||||
}
 | 
			
		||||
@@ -290,7 +299,7 @@ func GetUserGroup(id int) (group string, err error) {
 | 
			
		||||
	return group, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func IncreaseUserQuota(id int, quota int) (err error) {
 | 
			
		||||
func IncreaseUserQuota(id int, quota int64) (err error) {
 | 
			
		||||
	if quota < 0 {
 | 
			
		||||
		return errors.New("quota 不能为负数!")
 | 
			
		||||
	}
 | 
			
		||||
@@ -301,12 +310,12 @@ func IncreaseUserQuota(id int, quota int) (err error) {
 | 
			
		||||
	return increaseUserQuota(id, quota)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func increaseUserQuota(id int, quota int) (err error) {
 | 
			
		||||
func increaseUserQuota(id int, quota int64) (err error) {
 | 
			
		||||
	err = DB.Model(&User{}).Where("id = ?", id).Update("quota", gorm.Expr("quota + ?", quota)).Error
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func DecreaseUserQuota(id int, quota int) (err error) {
 | 
			
		||||
func DecreaseUserQuota(id int, quota int64) (err error) {
 | 
			
		||||
	if quota < 0 {
 | 
			
		||||
		return errors.New("quota 不能为负数!")
 | 
			
		||||
	}
 | 
			
		||||
@@ -317,7 +326,7 @@ func DecreaseUserQuota(id int, quota int) (err error) {
 | 
			
		||||
	return decreaseUserQuota(id, quota)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func decreaseUserQuota(id int, quota int) (err error) {
 | 
			
		||||
func decreaseUserQuota(id int, quota int64) (err error) {
 | 
			
		||||
	err = DB.Model(&User{}).Where("id = ?", id).Update("quota", gorm.Expr("quota - ?", quota)).Error
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
@@ -327,7 +336,7 @@ func GetRootUserEmail() (email string) {
 | 
			
		||||
	return email
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func UpdateUserUsedQuotaAndRequestCount(id int, quota int) {
 | 
			
		||||
func UpdateUserUsedQuotaAndRequestCount(id int, quota int64) {
 | 
			
		||||
	if config.BatchUpdateEnabled {
 | 
			
		||||
		addNewRecord(BatchUpdateTypeUsedQuota, id, quota)
 | 
			
		||||
		addNewRecord(BatchUpdateTypeRequestCount, id, 1)
 | 
			
		||||
@@ -336,7 +345,7 @@ func UpdateUserUsedQuotaAndRequestCount(id int, quota int) {
 | 
			
		||||
	updateUserUsedQuotaAndRequestCount(id, quota, 1)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func updateUserUsedQuotaAndRequestCount(id int, quota int, count int) {
 | 
			
		||||
func updateUserUsedQuotaAndRequestCount(id int, quota int64, count int) {
 | 
			
		||||
	err := DB.Model(&User{}).Where("id = ?", id).Updates(
 | 
			
		||||
		map[string]interface{}{
 | 
			
		||||
			"used_quota":    gorm.Expr("used_quota + ?", quota),
 | 
			
		||||
@@ -348,7 +357,7 @@ func updateUserUsedQuotaAndRequestCount(id int, quota int, count int) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func updateUserUsedQuota(id int, quota int) {
 | 
			
		||||
func updateUserUsedQuota(id int, quota int64) {
 | 
			
		||||
	err := DB.Model(&User{}).Where("id = ?", id).Updates(
 | 
			
		||||
		map[string]interface{}{
 | 
			
		||||
			"used_quota": gorm.Expr("used_quota + ?", quota),
 | 
			
		||||
 
 | 
			
		||||
@@ -1,8 +1,8 @@
 | 
			
		||||
package model
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
@@ -16,12 +16,12 @@ const (
 | 
			
		||||
	BatchUpdateTypeCount // if you add a new type, you need to add a new map and a new lock
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var batchUpdateStores []map[int]int
 | 
			
		||||
var batchUpdateStores []map[int]int64
 | 
			
		||||
var batchUpdateLocks []sync.Mutex
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	for i := 0; i < BatchUpdateTypeCount; i++ {
 | 
			
		||||
		batchUpdateStores = append(batchUpdateStores, make(map[int]int))
 | 
			
		||||
		batchUpdateStores = append(batchUpdateStores, make(map[int]int64))
 | 
			
		||||
		batchUpdateLocks = append(batchUpdateLocks, sync.Mutex{})
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@@ -35,7 +35,7 @@ func InitBatchUpdater() {
 | 
			
		||||
	}()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func addNewRecord(type_ int, id int, value int) {
 | 
			
		||||
func addNewRecord(type_ int, id int, value int64) {
 | 
			
		||||
	batchUpdateLocks[type_].Lock()
 | 
			
		||||
	defer batchUpdateLocks[type_].Unlock()
 | 
			
		||||
	if _, ok := batchUpdateStores[type_][id]; !ok {
 | 
			
		||||
@@ -50,7 +50,7 @@ func batchUpdate() {
 | 
			
		||||
	for i := 0; i < BatchUpdateTypeCount; i++ {
 | 
			
		||||
		batchUpdateLocks[i].Lock()
 | 
			
		||||
		store := batchUpdateStores[i]
 | 
			
		||||
		batchUpdateStores[i] = make(map[int]int)
 | 
			
		||||
		batchUpdateStores[i] = make(map[int]int64)
 | 
			
		||||
		batchUpdateLocks[i].Unlock()
 | 
			
		||||
		// TODO: maybe we can combine updates with same key?
 | 
			
		||||
		for key, value := range store {
 | 
			
		||||
@@ -68,7 +68,7 @@ func batchUpdate() {
 | 
			
		||||
			case BatchUpdateTypeUsedQuota:
 | 
			
		||||
				updateUserUsedQuota(key, value)
 | 
			
		||||
			case BatchUpdateTypeRequestCount:
 | 
			
		||||
				updateUserRequestCount(key, value)
 | 
			
		||||
				updateUserRequestCount(key, int(value))
 | 
			
		||||
			case BatchUpdateTypeChannelUsedQuota:
 | 
			
		||||
				updateChannelUsedQuota(key, value)
 | 
			
		||||
			}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										55
									
								
								monitor/channel.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										55
									
								
								monitor/channel.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,55 @@
 | 
			
		||||
package monitor
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/message"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func notifyRootUser(subject string, content string) {
 | 
			
		||||
	if config.MessagePusherAddress != "" {
 | 
			
		||||
		err := message.SendMessage(subject, content, content)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			logger.SysError(fmt.Sprintf("failed to send message: %s", err.Error()))
 | 
			
		||||
		} else {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if config.RootUserEmail == "" {
 | 
			
		||||
		config.RootUserEmail = model.GetRootUserEmail()
 | 
			
		||||
	}
 | 
			
		||||
	err := message.SendEmail(subject, config.RootUserEmail, content)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		logger.SysError(fmt.Sprintf("failed to send email: %s", err.Error()))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DisableChannel disable & notify
 | 
			
		||||
func DisableChannel(channelId int, channelName string, reason string) {
 | 
			
		||||
	model.UpdateChannelStatusById(channelId, common.ChannelStatusAutoDisabled)
 | 
			
		||||
	logger.SysLog(fmt.Sprintf("channel #%d has been disabled: %s", channelId, reason))
 | 
			
		||||
	subject := fmt.Sprintf("通道「%s」(#%d)已被禁用", channelName, channelId)
 | 
			
		||||
	content := fmt.Sprintf("通道「%s」(#%d)已被禁用,原因:%s", channelName, channelId, reason)
 | 
			
		||||
	notifyRootUser(subject, content)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func MetricDisableChannel(channelId int, successRate float64) {
 | 
			
		||||
	model.UpdateChannelStatusById(channelId, common.ChannelStatusAutoDisabled)
 | 
			
		||||
	logger.SysLog(fmt.Sprintf("channel #%d has been disabled due to low success rate: %.2f", channelId, successRate*100))
 | 
			
		||||
	subject := fmt.Sprintf("通道 #%d 已被禁用", channelId)
 | 
			
		||||
	content := fmt.Sprintf("该渠道在最近 %d 次调用中成功率为 %.2f%%,低于阈值 %.2f%%,因此被系统自动禁用。",
 | 
			
		||||
		config.MetricQueueSize, successRate*100, config.MetricSuccessRateThreshold*100)
 | 
			
		||||
	notifyRootUser(subject, content)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EnableChannel enable & notify
 | 
			
		||||
func EnableChannel(channelId int, channelName string) {
 | 
			
		||||
	model.UpdateChannelStatusById(channelId, common.ChannelStatusEnabled)
 | 
			
		||||
	logger.SysLog(fmt.Sprintf("channel #%d has been enabled", channelId))
 | 
			
		||||
	subject := fmt.Sprintf("通道「%s」(#%d)已被启用", channelName, channelId)
 | 
			
		||||
	content := fmt.Sprintf("通道「%s」(#%d)已被启用", channelName, channelId)
 | 
			
		||||
	notifyRootUser(subject, content)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										79
									
								
								monitor/metric.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										79
									
								
								monitor/metric.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,79 @@
 | 
			
		||||
package monitor
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var store = make(map[int][]bool)
 | 
			
		||||
var metricSuccessChan = make(chan int, config.MetricSuccessChanSize)
 | 
			
		||||
var metricFailChan = make(chan int, config.MetricFailChanSize)
 | 
			
		||||
 | 
			
		||||
func consumeSuccess(channelId int) {
 | 
			
		||||
	if len(store[channelId]) > config.MetricQueueSize {
 | 
			
		||||
		store[channelId] = store[channelId][1:]
 | 
			
		||||
	}
 | 
			
		||||
	store[channelId] = append(store[channelId], true)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func consumeFail(channelId int) (bool, float64) {
 | 
			
		||||
	if len(store[channelId]) > config.MetricQueueSize {
 | 
			
		||||
		store[channelId] = store[channelId][1:]
 | 
			
		||||
	}
 | 
			
		||||
	store[channelId] = append(store[channelId], false)
 | 
			
		||||
	successCount := 0
 | 
			
		||||
	for _, success := range store[channelId] {
 | 
			
		||||
		if success {
 | 
			
		||||
			successCount++
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	successRate := float64(successCount) / float64(len(store[channelId]))
 | 
			
		||||
	if len(store[channelId]) < config.MetricQueueSize {
 | 
			
		||||
		return false, successRate
 | 
			
		||||
	}
 | 
			
		||||
	if successRate < config.MetricSuccessRateThreshold {
 | 
			
		||||
		store[channelId] = make([]bool, 0)
 | 
			
		||||
		return true, successRate
 | 
			
		||||
	}
 | 
			
		||||
	return false, successRate
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func metricSuccessConsumer() {
 | 
			
		||||
	for {
 | 
			
		||||
		select {
 | 
			
		||||
		case channelId := <-metricSuccessChan:
 | 
			
		||||
			consumeSuccess(channelId)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func metricFailConsumer() {
 | 
			
		||||
	for {
 | 
			
		||||
		select {
 | 
			
		||||
		case channelId := <-metricFailChan:
 | 
			
		||||
			disable, successRate := consumeFail(channelId)
 | 
			
		||||
			if disable {
 | 
			
		||||
				go MetricDisableChannel(channelId, successRate)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	if config.EnableMetric {
 | 
			
		||||
		go metricSuccessConsumer()
 | 
			
		||||
		go metricFailConsumer()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Emit(channelId int, success bool) {
 | 
			
		||||
	if !config.EnableMetric {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	go func() {
 | 
			
		||||
		if success {
 | 
			
		||||
			metricSuccessChan <- channelId
 | 
			
		||||
		} else {
 | 
			
		||||
			metricFailChan <- channelId
 | 
			
		||||
		}
 | 
			
		||||
	}()
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										8
									
								
								relay/channel/ai360/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								relay/channel/ai360/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,8 @@
 | 
			
		||||
package ai360
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"360GPT_S2_V9",
 | 
			
		||||
	"embedding-bert-512-v1",
 | 
			
		||||
	"embedding_s1_v1",
 | 
			
		||||
	"semantic_similarity_s1_v1",
 | 
			
		||||
}
 | 
			
		||||
@@ -1,22 +1,60 @@
 | 
			
		||||
package aiproxy
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channel"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/util"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Adaptor struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) Auth(c *gin.Context) error {
 | 
			
		||||
func (a *Adaptor) Init(meta *util.RelayMeta) {
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
 | 
			
		||||
	return fmt.Sprintf("%s/api/library/ask", meta.BaseURL), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
 | 
			
		||||
	channel.SetupCommonRequestHeader(c, req, meta)
 | 
			
		||||
	req.Header.Set("Authorization", "Bearer "+meta.APIKey)
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) {
 | 
			
		||||
	return nil, nil
 | 
			
		||||
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
	aiProxyLibraryRequest := ConvertRequest(*request)
 | 
			
		||||
	aiProxyLibraryRequest.LibraryId = c.GetString(common.ConfigKeyLibraryID)
 | 
			
		||||
	return aiProxyLibraryRequest, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) {
 | 
			
		||||
	return nil, nil, nil
 | 
			
		||||
func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
 | 
			
		||||
	return channel.DoRequestHelper(a, c, meta, requestBody)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
 | 
			
		||||
	if meta.IsStream {
 | 
			
		||||
		err, usage = StreamHandler(c, resp)
 | 
			
		||||
	} else {
 | 
			
		||||
		err, usage = Handler(c, resp)
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetModelList() []string {
 | 
			
		||||
	return ModelList
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetChannelName() string {
 | 
			
		||||
	return "aiproxy"
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										9
									
								
								relay/channel/aiproxy/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								relay/channel/aiproxy/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,9 @@
 | 
			
		||||
package aiproxy
 | 
			
		||||
 | 
			
		||||
import "github.com/songquanpeng/one-api/relay/channel/openai"
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{""}
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	ModelList = openai.ModelList
 | 
			
		||||
}
 | 
			
		||||
@@ -5,20 +5,21 @@ import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channel/openai"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/constant"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
	"one-api/relay/constant"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// https://docs.aiproxy.io/dev/library#使用已经定制好的知识库进行对话问答
 | 
			
		||||
 | 
			
		||||
func ConvertRequest(request openai.GeneralOpenAIRequest) *LibraryRequest {
 | 
			
		||||
func ConvertRequest(request model.GeneralOpenAIRequest) *LibraryRequest {
 | 
			
		||||
	query := ""
 | 
			
		||||
	if len(request.Messages) != 0 {
 | 
			
		||||
		query = request.Messages[len(request.Messages)-1].StringContent()
 | 
			
		||||
@@ -45,14 +46,14 @@ func responseAIProxyLibrary2OpenAI(response *LibraryResponse) *openai.TextRespon
 | 
			
		||||
	content := response.Answer + aiProxyDocuments2Markdown(response.Documents)
 | 
			
		||||
	choice := openai.TextResponseChoice{
 | 
			
		||||
		Index: 0,
 | 
			
		||||
		Message: openai.Message{
 | 
			
		||||
		Message: model.Message{
 | 
			
		||||
			Role:    "assistant",
 | 
			
		||||
			Content: content,
 | 
			
		||||
		},
 | 
			
		||||
		FinishReason: "stop",
 | 
			
		||||
	}
 | 
			
		||||
	fullTextResponse := openai.TextResponse{
 | 
			
		||||
		Id:      helper.GetUUID(),
 | 
			
		||||
		Id:      fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
 | 
			
		||||
		Object:  "chat.completion",
 | 
			
		||||
		Created: helper.GetTimestamp(),
 | 
			
		||||
		Choices: []openai.TextResponseChoice{choice},
 | 
			
		||||
@@ -65,7 +66,7 @@ func documentsAIProxyLibrary(documents []LibraryDocument) *openai.ChatCompletion
 | 
			
		||||
	choice.Delta.Content = aiProxyDocuments2Markdown(documents)
 | 
			
		||||
	choice.FinishReason = &constant.StopFinishReason
 | 
			
		||||
	return &openai.ChatCompletionsStreamResponse{
 | 
			
		||||
		Id:      helper.GetUUID(),
 | 
			
		||||
		Id:      fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
 | 
			
		||||
		Object:  "chat.completion.chunk",
 | 
			
		||||
		Created: helper.GetTimestamp(),
 | 
			
		||||
		Model:   "",
 | 
			
		||||
@@ -77,7 +78,7 @@ func streamResponseAIProxyLibrary2OpenAI(response *LibraryStreamResponse) *opena
 | 
			
		||||
	var choice openai.ChatCompletionsStreamResponseChoice
 | 
			
		||||
	choice.Delta.Content = response.Content
 | 
			
		||||
	return &openai.ChatCompletionsStreamResponse{
 | 
			
		||||
		Id:      helper.GetUUID(),
 | 
			
		||||
		Id:      fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
 | 
			
		||||
		Object:  "chat.completion.chunk",
 | 
			
		||||
		Created: helper.GetTimestamp(),
 | 
			
		||||
		Model:   response.Model,
 | 
			
		||||
@@ -85,8 +86,8 @@ func streamResponseAIProxyLibrary2OpenAI(response *LibraryStreamResponse) *opena
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
 | 
			
		||||
	var usage openai.Usage
 | 
			
		||||
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	var usage model.Usage
 | 
			
		||||
	scanner := bufio.NewScanner(resp.Body)
 | 
			
		||||
	scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
 | 
			
		||||
		if atEOF && len(data) == 0 {
 | 
			
		||||
@@ -157,7 +158,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
 | 
			
		||||
	return nil, &usage
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
 | 
			
		||||
func Handler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	var AIProxyLibraryResponse LibraryResponse
 | 
			
		||||
	responseBody, err := io.ReadAll(resp.Body)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -172,8 +173,8 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
 | 
			
		||||
		return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	if AIProxyLibraryResponse.ErrCode != 0 {
 | 
			
		||||
		return &openai.ErrorWithStatusCode{
 | 
			
		||||
			Error: openai.Error{
 | 
			
		||||
		return &model.ErrorWithStatusCode{
 | 
			
		||||
			Error: model.Error{
 | 
			
		||||
				Message: AIProxyLibraryResponse.Message,
 | 
			
		||||
				Type:    strconv.Itoa(AIProxyLibraryResponse.ErrCode),
 | 
			
		||||
				Code:    AIProxyLibraryResponse.ErrCode,
 | 
			
		||||
@@ -189,5 +190,8 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
 | 
			
		||||
	c.Writer.Header().Set("Content-Type", "application/json")
 | 
			
		||||
	c.Writer.WriteHeader(resp.StatusCode)
 | 
			
		||||
	_, err = c.Writer.Write(jsonResponse)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return openai.ErrorWrapper(err, "write_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	return nil, &fullTextResponse.Usage
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,22 +1,86 @@
 | 
			
		||||
package ali
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channel"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/constant"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/util"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// https://help.aliyun.com/zh/dashscope/developer-reference/api-details
 | 
			
		||||
 | 
			
		||||
type Adaptor struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) Auth(c *gin.Context) error {
 | 
			
		||||
func (a *Adaptor) Init(meta *util.RelayMeta) {
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
 | 
			
		||||
	fullRequestURL := fmt.Sprintf("%s/api/v1/services/aigc/text-generation/generation", meta.BaseURL)
 | 
			
		||||
	if meta.Mode == constant.RelayModeEmbeddings {
 | 
			
		||||
		fullRequestURL = fmt.Sprintf("%s/api/v1/services/embeddings/text-embedding/text-embedding", meta.BaseURL)
 | 
			
		||||
	}
 | 
			
		||||
	return fullRequestURL, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
 | 
			
		||||
	channel.SetupCommonRequestHeader(c, req, meta)
 | 
			
		||||
	if meta.IsStream {
 | 
			
		||||
		req.Header.Set("Accept", "text/event-stream")
 | 
			
		||||
	}
 | 
			
		||||
	req.Header.Set("Authorization", "Bearer "+meta.APIKey)
 | 
			
		||||
	if meta.IsStream {
 | 
			
		||||
		req.Header.Set("X-DashScope-SSE", "enable")
 | 
			
		||||
	}
 | 
			
		||||
	if c.GetString(common.ConfigKeyPlugin) != "" {
 | 
			
		||||
		req.Header.Set("X-DashScope-Plugin", c.GetString(common.ConfigKeyPlugin))
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) {
 | 
			
		||||
	return nil, nil
 | 
			
		||||
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
	switch relayMode {
 | 
			
		||||
	case constant.RelayModeEmbeddings:
 | 
			
		||||
		baiduEmbeddingRequest := ConvertEmbeddingRequest(*request)
 | 
			
		||||
		return baiduEmbeddingRequest, nil
 | 
			
		||||
	default:
 | 
			
		||||
		baiduRequest := ConvertRequest(*request)
 | 
			
		||||
		return baiduRequest, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) {
 | 
			
		||||
	return nil, nil, nil
 | 
			
		||||
func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
 | 
			
		||||
	return channel.DoRequestHelper(a, c, meta, requestBody)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
 | 
			
		||||
	if meta.IsStream {
 | 
			
		||||
		err, usage = StreamHandler(c, resp)
 | 
			
		||||
	} else {
 | 
			
		||||
		switch meta.Mode {
 | 
			
		||||
		case constant.RelayModeEmbeddings:
 | 
			
		||||
			err, usage = EmbeddingHandler(c, resp)
 | 
			
		||||
		default:
 | 
			
		||||
			err, usage = Handler(c, resp)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetModelList() []string {
 | 
			
		||||
	return ModelList
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetChannelName() string {
 | 
			
		||||
	return "ali"
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										6
									
								
								relay/channel/ali/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								relay/channel/ali/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,6 @@
 | 
			
		||||
package ali
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"qwen-turbo", "qwen-plus", "qwen-max", "qwen-max-longcontext",
 | 
			
		||||
	"text-embedding-v1",
 | 
			
		||||
}
 | 
			
		||||
@@ -4,12 +4,13 @@ import (
 | 
			
		||||
	"bufio"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channel/openai"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
	"strings"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@@ -17,7 +18,7 @@ import (
 | 
			
		||||
 | 
			
		||||
const EnableSearchModelSuffix = "-internet"
 | 
			
		||||
 | 
			
		||||
func ConvertRequest(request openai.GeneralOpenAIRequest) *ChatRequest {
 | 
			
		||||
func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
 | 
			
		||||
	messages := make([]Message, 0, len(request.Messages))
 | 
			
		||||
	for i := 0; i < len(request.Messages); i++ {
 | 
			
		||||
		message := request.Messages[i]
 | 
			
		||||
@@ -32,6 +33,9 @@ func ConvertRequest(request openai.GeneralOpenAIRequest) *ChatRequest {
 | 
			
		||||
		enableSearch = true
 | 
			
		||||
		aliModel = strings.TrimSuffix(aliModel, EnableSearchModelSuffix)
 | 
			
		||||
	}
 | 
			
		||||
	if request.TopP >= 1 {
 | 
			
		||||
		request.TopP = 0.9999
 | 
			
		||||
	}
 | 
			
		||||
	return &ChatRequest{
 | 
			
		||||
		Model: aliModel,
 | 
			
		||||
		Input: Input{
 | 
			
		||||
@@ -40,11 +44,15 @@ func ConvertRequest(request openai.GeneralOpenAIRequest) *ChatRequest {
 | 
			
		||||
		Parameters: Parameters{
 | 
			
		||||
			EnableSearch:      enableSearch,
 | 
			
		||||
			IncrementalOutput: request.Stream,
 | 
			
		||||
			Seed:              uint64(request.Seed),
 | 
			
		||||
			MaxTokens:         request.MaxTokens,
 | 
			
		||||
			Temperature:       request.Temperature,
 | 
			
		||||
			TopP:              request.TopP,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func ConvertEmbeddingRequest(request openai.GeneralOpenAIRequest) *EmbeddingRequest {
 | 
			
		||||
func ConvertEmbeddingRequest(request model.GeneralOpenAIRequest) *EmbeddingRequest {
 | 
			
		||||
	return &EmbeddingRequest{
 | 
			
		||||
		Model: "text-embedding-v1",
 | 
			
		||||
		Input: struct {
 | 
			
		||||
@@ -55,7 +63,7 @@ func ConvertEmbeddingRequest(request openai.GeneralOpenAIRequest) *EmbeddingRequ
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func EmbeddingHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
 | 
			
		||||
func EmbeddingHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	var aliResponse EmbeddingResponse
 | 
			
		||||
	err := json.NewDecoder(resp.Body).Decode(&aliResponse)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -68,8 +76,8 @@ func EmbeddingHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithSta
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if aliResponse.Code != "" {
 | 
			
		||||
		return &openai.ErrorWithStatusCode{
 | 
			
		||||
			Error: openai.Error{
 | 
			
		||||
		return &model.ErrorWithStatusCode{
 | 
			
		||||
			Error: model.Error{
 | 
			
		||||
				Message: aliResponse.Message,
 | 
			
		||||
				Type:    aliResponse.Code,
 | 
			
		||||
				Param:   aliResponse.RequestId,
 | 
			
		||||
@@ -95,7 +103,7 @@ func embeddingResponseAli2OpenAI(response *EmbeddingResponse) *openai.EmbeddingR
 | 
			
		||||
		Object: "list",
 | 
			
		||||
		Data:   make([]openai.EmbeddingResponseItem, 0, len(response.Output.Embeddings)),
 | 
			
		||||
		Model:  "text-embedding-v1",
 | 
			
		||||
		Usage:  openai.Usage{TotalTokens: response.Usage.TotalTokens},
 | 
			
		||||
		Usage:  model.Usage{TotalTokens: response.Usage.TotalTokens},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, item := range response.Output.Embeddings {
 | 
			
		||||
@@ -111,7 +119,7 @@ func embeddingResponseAli2OpenAI(response *EmbeddingResponse) *openai.EmbeddingR
 | 
			
		||||
func responseAli2OpenAI(response *ChatResponse) *openai.TextResponse {
 | 
			
		||||
	choice := openai.TextResponseChoice{
 | 
			
		||||
		Index: 0,
 | 
			
		||||
		Message: openai.Message{
 | 
			
		||||
		Message: model.Message{
 | 
			
		||||
			Role:    "assistant",
 | 
			
		||||
			Content: response.Output.Text,
 | 
			
		||||
		},
 | 
			
		||||
@@ -122,7 +130,7 @@ func responseAli2OpenAI(response *ChatResponse) *openai.TextResponse {
 | 
			
		||||
		Object:  "chat.completion",
 | 
			
		||||
		Created: helper.GetTimestamp(),
 | 
			
		||||
		Choices: []openai.TextResponseChoice{choice},
 | 
			
		||||
		Usage: openai.Usage{
 | 
			
		||||
		Usage: model.Usage{
 | 
			
		||||
			PromptTokens:     response.Usage.InputTokens,
 | 
			
		||||
			CompletionTokens: response.Usage.OutputTokens,
 | 
			
		||||
			TotalTokens:      response.Usage.InputTokens + response.Usage.OutputTokens,
 | 
			
		||||
@@ -148,8 +156,8 @@ func streamResponseAli2OpenAI(aliResponse *ChatResponse) *openai.ChatCompletions
 | 
			
		||||
	return &response
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
 | 
			
		||||
	var usage openai.Usage
 | 
			
		||||
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	var usage model.Usage
 | 
			
		||||
	scanner := bufio.NewScanner(resp.Body)
 | 
			
		||||
	scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
 | 
			
		||||
		if atEOF && len(data) == 0 {
 | 
			
		||||
@@ -217,7 +225,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
 | 
			
		||||
	return nil, &usage
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
 | 
			
		||||
func Handler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	var aliResponse ChatResponse
 | 
			
		||||
	responseBody, err := io.ReadAll(resp.Body)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -232,8 +240,8 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
 | 
			
		||||
		return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	if aliResponse.Code != "" {
 | 
			
		||||
		return &openai.ErrorWithStatusCode{
 | 
			
		||||
			Error: openai.Error{
 | 
			
		||||
		return &model.ErrorWithStatusCode{
 | 
			
		||||
			Error: model.Error{
 | 
			
		||||
				Message: aliResponse.Message,
 | 
			
		||||
				Type:    aliResponse.Code,
 | 
			
		||||
				Param:   aliResponse.RequestId,
 | 
			
		||||
 
 | 
			
		||||
@@ -16,6 +16,8 @@ type Parameters struct {
 | 
			
		||||
	Seed              uint64  `json:"seed,omitempty"`
 | 
			
		||||
	EnableSearch      bool    `json:"enable_search,omitempty"`
 | 
			
		||||
	IncrementalOutput bool    `json:"incremental_output,omitempty"`
 | 
			
		||||
	MaxTokens         int     `json:"max_tokens,omitempty"`
 | 
			
		||||
	Temperature       float64 `json:"temperature,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ChatRequest struct {
 | 
			
		||||
 
 | 
			
		||||
@@ -1,22 +1,63 @@
 | 
			
		||||
package anthropic
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channel"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/util"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Adaptor struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) Auth(c *gin.Context) error {
 | 
			
		||||
func (a *Adaptor) Init(meta *util.RelayMeta) {
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
 | 
			
		||||
	return fmt.Sprintf("%s/v1/messages", meta.BaseURL), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
 | 
			
		||||
	channel.SetupCommonRequestHeader(c, req, meta)
 | 
			
		||||
	req.Header.Set("x-api-key", meta.APIKey)
 | 
			
		||||
	anthropicVersion := c.Request.Header.Get("anthropic-version")
 | 
			
		||||
	if anthropicVersion == "" {
 | 
			
		||||
		anthropicVersion = "2023-06-01"
 | 
			
		||||
	}
 | 
			
		||||
	req.Header.Set("anthropic-version", anthropicVersion)
 | 
			
		||||
	req.Header.Set("anthropic-beta", "messages-2023-12-15")
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) {
 | 
			
		||||
	return nil, nil
 | 
			
		||||
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
	return ConvertRequest(*request), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) {
 | 
			
		||||
	return nil, nil, nil
 | 
			
		||||
func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
 | 
			
		||||
	return channel.DoRequestHelper(a, c, meta, requestBody)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
 | 
			
		||||
	if meta.IsStream {
 | 
			
		||||
		err, usage = StreamHandler(c, resp)
 | 
			
		||||
	} else {
 | 
			
		||||
		err, usage = Handler(c, resp, meta.PromptTokens, meta.ActualModelName)
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetModelList() []string {
 | 
			
		||||
	return ModelList
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetChannelName() string {
 | 
			
		||||
	return "anthropic"
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										8
									
								
								relay/channel/anthropic/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								relay/channel/anthropic/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,8 @@
 | 
			
		||||
package anthropic
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"claude-instant-1.2", "claude-2.0", "claude-2.1",
 | 
			
		||||
	"claude-3-haiku-20240307",
 | 
			
		||||
	"claude-3-sonnet-20240229",
 | 
			
		||||
	"claude-3-opus-20240229",
 | 
			
		||||
}
 | 
			
		||||
@@ -5,82 +5,146 @@ import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/image"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channel/openai"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
	"strings"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func stopReasonClaude2OpenAI(reason string) string {
 | 
			
		||||
	switch reason {
 | 
			
		||||
func stopReasonClaude2OpenAI(reason *string) string {
 | 
			
		||||
	if reason == nil {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
	switch *reason {
 | 
			
		||||
	case "end_turn":
 | 
			
		||||
		return "stop"
 | 
			
		||||
	case "stop_sequence":
 | 
			
		||||
		return "stop"
 | 
			
		||||
	case "max_tokens":
 | 
			
		||||
		return "length"
 | 
			
		||||
	default:
 | 
			
		||||
		return reason
 | 
			
		||||
		return *reason
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func ConvertRequest(textRequest openai.GeneralOpenAIRequest) *Request {
 | 
			
		||||
func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
 | 
			
		||||
	claudeRequest := Request{
 | 
			
		||||
		Model:             textRequest.Model,
 | 
			
		||||
		Prompt:            "",
 | 
			
		||||
		MaxTokensToSample: textRequest.MaxTokens,
 | 
			
		||||
		StopSequences:     nil,
 | 
			
		||||
		Temperature:       textRequest.Temperature,
 | 
			
		||||
		TopP:              textRequest.TopP,
 | 
			
		||||
		Stream:            textRequest.Stream,
 | 
			
		||||
		Model:       textRequest.Model,
 | 
			
		||||
		MaxTokens:   textRequest.MaxTokens,
 | 
			
		||||
		Temperature: textRequest.Temperature,
 | 
			
		||||
		TopP:        textRequest.TopP,
 | 
			
		||||
		Stream:      textRequest.Stream,
 | 
			
		||||
	}
 | 
			
		||||
	if claudeRequest.MaxTokensToSample == 0 {
 | 
			
		||||
		claudeRequest.MaxTokensToSample = 1000000
 | 
			
		||||
	if claudeRequest.MaxTokens == 0 {
 | 
			
		||||
		claudeRequest.MaxTokens = 4096
 | 
			
		||||
	}
 | 
			
		||||
	// legacy model name mapping
 | 
			
		||||
	if claudeRequest.Model == "claude-instant-1" {
 | 
			
		||||
		claudeRequest.Model = "claude-instant-1.1"
 | 
			
		||||
	} else if claudeRequest.Model == "claude-2" {
 | 
			
		||||
		claudeRequest.Model = "claude-2.1"
 | 
			
		||||
	}
 | 
			
		||||
	prompt := ""
 | 
			
		||||
	for _, message := range textRequest.Messages {
 | 
			
		||||
		if message.Role == "user" {
 | 
			
		||||
			prompt += fmt.Sprintf("\n\nHuman: %s", message.Content)
 | 
			
		||||
		} else if message.Role == "assistant" {
 | 
			
		||||
			prompt += fmt.Sprintf("\n\nAssistant: %s", message.Content)
 | 
			
		||||
		} else if message.Role == "system" {
 | 
			
		||||
			if prompt == "" {
 | 
			
		||||
				prompt = message.StringContent()
 | 
			
		||||
			}
 | 
			
		||||
		if message.Role == "system" && claudeRequest.System == "" {
 | 
			
		||||
			claudeRequest.System = message.StringContent()
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		claudeMessage := Message{
 | 
			
		||||
			Role: message.Role,
 | 
			
		||||
		}
 | 
			
		||||
		var content Content
 | 
			
		||||
		if message.IsStringContent() {
 | 
			
		||||
			content.Type = "text"
 | 
			
		||||
			content.Text = message.StringContent()
 | 
			
		||||
			claudeMessage.Content = append(claudeMessage.Content, content)
 | 
			
		||||
			claudeRequest.Messages = append(claudeRequest.Messages, claudeMessage)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		var contents []Content
 | 
			
		||||
		openaiContent := message.ParseContent()
 | 
			
		||||
		for _, part := range openaiContent {
 | 
			
		||||
			var content Content
 | 
			
		||||
			if part.Type == model.ContentTypeText {
 | 
			
		||||
				content.Type = "text"
 | 
			
		||||
				content.Text = part.Text
 | 
			
		||||
			} else if part.Type == model.ContentTypeImageURL {
 | 
			
		||||
				content.Type = "image"
 | 
			
		||||
				content.Source = &ImageSource{
 | 
			
		||||
					Type: "base64",
 | 
			
		||||
				}
 | 
			
		||||
				mimeType, data, _ := image.GetImageFromUrl(part.ImageURL.Url)
 | 
			
		||||
				content.Source.MediaType = mimeType
 | 
			
		||||
				content.Source.Data = data
 | 
			
		||||
			}
 | 
			
		||||
			contents = append(contents, content)
 | 
			
		||||
		}
 | 
			
		||||
		claudeMessage.Content = contents
 | 
			
		||||
		claudeRequest.Messages = append(claudeRequest.Messages, claudeMessage)
 | 
			
		||||
	}
 | 
			
		||||
	prompt += "\n\nAssistant:"
 | 
			
		||||
	claudeRequest.Prompt = prompt
 | 
			
		||||
	return &claudeRequest
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func streamResponseClaude2OpenAI(claudeResponse *Response) *openai.ChatCompletionsStreamResponse {
 | 
			
		||||
// https://docs.anthropic.com/claude/reference/messages-streaming
 | 
			
		||||
func streamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCompletionsStreamResponse, *Response) {
 | 
			
		||||
	var response *Response
 | 
			
		||||
	var responseText string
 | 
			
		||||
	var stopReason string
 | 
			
		||||
	switch claudeResponse.Type {
 | 
			
		||||
	case "message_start":
 | 
			
		||||
		return nil, claudeResponse.Message
 | 
			
		||||
	case "content_block_start":
 | 
			
		||||
		if claudeResponse.ContentBlock != nil {
 | 
			
		||||
			responseText = claudeResponse.ContentBlock.Text
 | 
			
		||||
		}
 | 
			
		||||
	case "content_block_delta":
 | 
			
		||||
		if claudeResponse.Delta != nil {
 | 
			
		||||
			responseText = claudeResponse.Delta.Text
 | 
			
		||||
		}
 | 
			
		||||
	case "message_delta":
 | 
			
		||||
		if claudeResponse.Usage != nil {
 | 
			
		||||
			response = &Response{
 | 
			
		||||
				Usage: *claudeResponse.Usage,
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		if claudeResponse.Delta != nil && claudeResponse.Delta.StopReason != nil {
 | 
			
		||||
			stopReason = *claudeResponse.Delta.StopReason
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	var choice openai.ChatCompletionsStreamResponseChoice
 | 
			
		||||
	choice.Delta.Content = claudeResponse.Completion
 | 
			
		||||
	finishReason := stopReasonClaude2OpenAI(claudeResponse.StopReason)
 | 
			
		||||
	choice.Delta.Content = responseText
 | 
			
		||||
	choice.Delta.Role = "assistant"
 | 
			
		||||
	finishReason := stopReasonClaude2OpenAI(&stopReason)
 | 
			
		||||
	if finishReason != "null" {
 | 
			
		||||
		choice.FinishReason = &finishReason
 | 
			
		||||
	}
 | 
			
		||||
	var response openai.ChatCompletionsStreamResponse
 | 
			
		||||
	response.Object = "chat.completion.chunk"
 | 
			
		||||
	response.Model = claudeResponse.Model
 | 
			
		||||
	response.Choices = []openai.ChatCompletionsStreamResponseChoice{choice}
 | 
			
		||||
	return &response
 | 
			
		||||
	var openaiResponse openai.ChatCompletionsStreamResponse
 | 
			
		||||
	openaiResponse.Object = "chat.completion.chunk"
 | 
			
		||||
	openaiResponse.Choices = []openai.ChatCompletionsStreamResponseChoice{choice}
 | 
			
		||||
	return &openaiResponse, response
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func responseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse {
 | 
			
		||||
	var responseText string
 | 
			
		||||
	if len(claudeResponse.Content) > 0 {
 | 
			
		||||
		responseText = claudeResponse.Content[0].Text
 | 
			
		||||
	}
 | 
			
		||||
	choice := openai.TextResponseChoice{
 | 
			
		||||
		Index: 0,
 | 
			
		||||
		Message: openai.Message{
 | 
			
		||||
		Message: model.Message{
 | 
			
		||||
			Role:    "assistant",
 | 
			
		||||
			Content: strings.TrimPrefix(claudeResponse.Completion, " "),
 | 
			
		||||
			Content: responseText,
 | 
			
		||||
			Name:    nil,
 | 
			
		||||
		},
 | 
			
		||||
		FinishReason: stopReasonClaude2OpenAI(claudeResponse.StopReason),
 | 
			
		||||
	}
 | 
			
		||||
	fullTextResponse := openai.TextResponse{
 | 
			
		||||
		Id:      fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
 | 
			
		||||
		Id:      fmt.Sprintf("chatcmpl-%s", claudeResponse.Id),
 | 
			
		||||
		Model:   claudeResponse.Model,
 | 
			
		||||
		Object:  "chat.completion",
 | 
			
		||||
		Created: helper.GetTimestamp(),
 | 
			
		||||
		Choices: []openai.TextResponseChoice{choice},
 | 
			
		||||
@@ -88,17 +152,15 @@ func responseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse {
 | 
			
		||||
	return &fullTextResponse
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, string) {
 | 
			
		||||
	responseText := ""
 | 
			
		||||
	responseId := fmt.Sprintf("chatcmpl-%s", helper.GetUUID())
 | 
			
		||||
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	createdTime := helper.GetTimestamp()
 | 
			
		||||
	scanner := bufio.NewScanner(resp.Body)
 | 
			
		||||
	scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
 | 
			
		||||
		if atEOF && len(data) == 0 {
 | 
			
		||||
			return 0, nil, nil
 | 
			
		||||
		}
 | 
			
		||||
		if i := strings.Index(string(data), "\r\n\r\n"); i >= 0 {
 | 
			
		||||
			return i + 4, data[0:i], nil
 | 
			
		||||
		if i := strings.Index(string(data), "\n"); i >= 0 {
 | 
			
		||||
			return i + 1, data[0:i], nil
 | 
			
		||||
		}
 | 
			
		||||
		if atEOF {
 | 
			
		||||
			return len(data), data, nil
 | 
			
		||||
@@ -110,29 +172,45 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
 | 
			
		||||
	go func() {
 | 
			
		||||
		for scanner.Scan() {
 | 
			
		||||
			data := scanner.Text()
 | 
			
		||||
			if !strings.HasPrefix(data, "event: completion") {
 | 
			
		||||
			if len(data) < 6 {
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
			data = strings.TrimPrefix(data, "event: completion\r\ndata: ")
 | 
			
		||||
			if !strings.HasPrefix(data, "data: ") {
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
			data = strings.TrimPrefix(data, "data: ")
 | 
			
		||||
			dataChan <- data
 | 
			
		||||
		}
 | 
			
		||||
		stopChan <- true
 | 
			
		||||
	}()
 | 
			
		||||
	common.SetEventStreamHeaders(c)
 | 
			
		||||
	var usage model.Usage
 | 
			
		||||
	var modelName string
 | 
			
		||||
	var id string
 | 
			
		||||
	c.Stream(func(w io.Writer) bool {
 | 
			
		||||
		select {
 | 
			
		||||
		case data := <-dataChan:
 | 
			
		||||
			// some implementations may add \r at the end of data
 | 
			
		||||
			data = strings.TrimSuffix(data, "\r")
 | 
			
		||||
			var claudeResponse Response
 | 
			
		||||
			var claudeResponse StreamResponse
 | 
			
		||||
			err := json.Unmarshal([]byte(data), &claudeResponse)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				logger.SysError("error unmarshalling stream response: " + err.Error())
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
			responseText += claudeResponse.Completion
 | 
			
		||||
			response := streamResponseClaude2OpenAI(&claudeResponse)
 | 
			
		||||
			response.Id = responseId
 | 
			
		||||
			response, meta := streamResponseClaude2OpenAI(&claudeResponse)
 | 
			
		||||
			if meta != nil {
 | 
			
		||||
				usage.PromptTokens += meta.Usage.InputTokens
 | 
			
		||||
				usage.CompletionTokens += meta.Usage.OutputTokens
 | 
			
		||||
				modelName = meta.Model
 | 
			
		||||
				id = fmt.Sprintf("chatcmpl-%s", meta.Id)
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
			if response == nil {
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
			response.Id = id
 | 
			
		||||
			response.Model = modelName
 | 
			
		||||
			response.Created = createdTime
 | 
			
		||||
			jsonStr, err := json.Marshal(response)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
@@ -146,14 +224,11 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
	})
 | 
			
		||||
	err := resp.Body.Close()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
 | 
			
		||||
	}
 | 
			
		||||
	return nil, responseText
 | 
			
		||||
	_ = resp.Body.Close()
 | 
			
		||||
	return nil, &usage
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*openai.ErrorWithStatusCode, *openai.Usage) {
 | 
			
		||||
func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	responseBody, err := io.ReadAll(resp.Body)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
@@ -168,8 +243,8 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string
 | 
			
		||||
		return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	if claudeResponse.Error.Type != "" {
 | 
			
		||||
		return &openai.ErrorWithStatusCode{
 | 
			
		||||
			Error: openai.Error{
 | 
			
		||||
		return &model.ErrorWithStatusCode{
 | 
			
		||||
			Error: model.Error{
 | 
			
		||||
				Message: claudeResponse.Error.Message,
 | 
			
		||||
				Type:    claudeResponse.Error.Type,
 | 
			
		||||
				Param:   "",
 | 
			
		||||
@@ -179,12 +254,11 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string
 | 
			
		||||
		}, nil
 | 
			
		||||
	}
 | 
			
		||||
	fullTextResponse := responseClaude2OpenAI(&claudeResponse)
 | 
			
		||||
	fullTextResponse.Model = model
 | 
			
		||||
	completionTokens := openai.CountTokenText(claudeResponse.Completion, model)
 | 
			
		||||
	usage := openai.Usage{
 | 
			
		||||
		PromptTokens:     promptTokens,
 | 
			
		||||
		CompletionTokens: completionTokens,
 | 
			
		||||
		TotalTokens:      promptTokens + completionTokens,
 | 
			
		||||
	fullTextResponse.Model = modelName
 | 
			
		||||
	usage := model.Usage{
 | 
			
		||||
		PromptTokens:     claudeResponse.Usage.InputTokens,
 | 
			
		||||
		CompletionTokens: claudeResponse.Usage.OutputTokens,
 | 
			
		||||
		TotalTokens:      claudeResponse.Usage.InputTokens + claudeResponse.Usage.OutputTokens,
 | 
			
		||||
	}
 | 
			
		||||
	fullTextResponse.Usage = usage
 | 
			
		||||
	jsonResponse, err := json.Marshal(fullTextResponse)
 | 
			
		||||
 
 | 
			
		||||
@@ -1,19 +1,44 @@
 | 
			
		||||
package anthropic
 | 
			
		||||
 | 
			
		||||
// https://docs.anthropic.com/claude/reference/messages_post
 | 
			
		||||
 | 
			
		||||
type Metadata struct {
 | 
			
		||||
	UserId string `json:"user_id"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ImageSource struct {
 | 
			
		||||
	Type      string `json:"type"`
 | 
			
		||||
	MediaType string `json:"media_type"`
 | 
			
		||||
	Data      string `json:"data"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Content struct {
 | 
			
		||||
	Type   string       `json:"type"`
 | 
			
		||||
	Text   string       `json:"text,omitempty"`
 | 
			
		||||
	Source *ImageSource `json:"source,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Message struct {
 | 
			
		||||
	Role    string    `json:"role"`
 | 
			
		||||
	Content []Content `json:"content"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Request struct {
 | 
			
		||||
	Model             string   `json:"model"`
 | 
			
		||||
	Prompt            string   `json:"prompt"`
 | 
			
		||||
	MaxTokensToSample int      `json:"max_tokens_to_sample"`
 | 
			
		||||
	StopSequences     []string `json:"stop_sequences,omitempty"`
 | 
			
		||||
	Temperature       float64  `json:"temperature,omitempty"`
 | 
			
		||||
	TopP              float64  `json:"top_p,omitempty"`
 | 
			
		||||
	TopK              int      `json:"top_k,omitempty"`
 | 
			
		||||
	Model         string    `json:"model"`
 | 
			
		||||
	Messages      []Message `json:"messages"`
 | 
			
		||||
	System        string    `json:"system,omitempty"`
 | 
			
		||||
	MaxTokens     int       `json:"max_tokens,omitempty"`
 | 
			
		||||
	StopSequences []string  `json:"stop_sequences,omitempty"`
 | 
			
		||||
	Stream        bool      `json:"stream,omitempty"`
 | 
			
		||||
	Temperature   float64   `json:"temperature,omitempty"`
 | 
			
		||||
	TopP          float64   `json:"top_p,omitempty"`
 | 
			
		||||
	TopK          int       `json:"top_k,omitempty"`
 | 
			
		||||
	//Metadata    `json:"metadata,omitempty"`
 | 
			
		||||
	Stream bool `json:"stream,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Usage struct {
 | 
			
		||||
	InputTokens  int `json:"input_tokens"`
 | 
			
		||||
	OutputTokens int `json:"output_tokens"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Error struct {
 | 
			
		||||
@@ -22,8 +47,29 @@ type Error struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Response struct {
 | 
			
		||||
	Completion string `json:"completion"`
 | 
			
		||||
	StopReason string `json:"stop_reason"`
 | 
			
		||||
	Model      string `json:"model"`
 | 
			
		||||
	Error      Error  `json:"error"`
 | 
			
		||||
	Id           string    `json:"id"`
 | 
			
		||||
	Type         string    `json:"type"`
 | 
			
		||||
	Role         string    `json:"role"`
 | 
			
		||||
	Content      []Content `json:"content"`
 | 
			
		||||
	Model        string    `json:"model"`
 | 
			
		||||
	StopReason   *string   `json:"stop_reason"`
 | 
			
		||||
	StopSequence *string   `json:"stop_sequence"`
 | 
			
		||||
	Usage        Usage     `json:"usage"`
 | 
			
		||||
	Error        Error     `json:"error"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Delta struct {
 | 
			
		||||
	Type         string  `json:"type"`
 | 
			
		||||
	Text         string  `json:"text"`
 | 
			
		||||
	StopReason   *string `json:"stop_reason"`
 | 
			
		||||
	StopSequence *string `json:"stop_sequence"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type StreamResponse struct {
 | 
			
		||||
	Type         string    `json:"type"`
 | 
			
		||||
	Message      *Response `json:"message"`
 | 
			
		||||
	Index        int       `json:"index"`
 | 
			
		||||
	ContentBlock *Content  `json:"content_block"`
 | 
			
		||||
	Delta        *Delta    `json:"delta"`
 | 
			
		||||
	Usage        *Usage    `json:"usage"`
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										7
									
								
								relay/channel/baichuan/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								relay/channel/baichuan/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,7 @@
 | 
			
		||||
package baichuan
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"Baichuan2-Turbo",
 | 
			
		||||
	"Baichuan2-Turbo-192k",
 | 
			
		||||
	"Baichuan-Text-Embedding",
 | 
			
		||||
}
 | 
			
		||||
@@ -1,22 +1,118 @@
 | 
			
		||||
package baidu
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channel"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/constant"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/util"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Adaptor struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) Auth(c *gin.Context) error {
 | 
			
		||||
func (a *Adaptor) Init(meta *util.RelayMeta) {
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
 | 
			
		||||
	// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/clntwmv7t
 | 
			
		||||
	suffix := "chat/"
 | 
			
		||||
	if strings.HasPrefix(meta.ActualModelName, "Embedding") {
 | 
			
		||||
		suffix = "embeddings/"
 | 
			
		||||
	}
 | 
			
		||||
	if strings.HasPrefix(meta.ActualModelName, "bge-large") {
 | 
			
		||||
		suffix = "embeddings/"
 | 
			
		||||
	}
 | 
			
		||||
	if strings.HasPrefix(meta.ActualModelName, "tao-8k") {
 | 
			
		||||
		suffix = "embeddings/"
 | 
			
		||||
	}
 | 
			
		||||
	switch meta.ActualModelName {
 | 
			
		||||
	case "ERNIE-4.0":
 | 
			
		||||
		suffix += "completions_pro"
 | 
			
		||||
	case "ERNIE-Bot-4":
 | 
			
		||||
		suffix += "completions_pro"
 | 
			
		||||
	case "ERNIE-3.5-8K":
 | 
			
		||||
		suffix += "completions"
 | 
			
		||||
	case "ERNIE-Bot-8K":
 | 
			
		||||
		suffix += "ernie_bot_8k"
 | 
			
		||||
	case "ERNIE-Bot":
 | 
			
		||||
		suffix += "completions"
 | 
			
		||||
	case "ERNIE-Speed":
 | 
			
		||||
		suffix += "ernie_speed"
 | 
			
		||||
	case "ERNIE-Bot-turbo":
 | 
			
		||||
		suffix += "eb-instant"
 | 
			
		||||
	case "BLOOMZ-7B":
 | 
			
		||||
		suffix += "bloomz_7b1"
 | 
			
		||||
	case "Embedding-V1":
 | 
			
		||||
		suffix += "embedding-v1"
 | 
			
		||||
	case "bge-large-zh":
 | 
			
		||||
		suffix += "bge_large_zh"
 | 
			
		||||
	case "bge-large-en":
 | 
			
		||||
		suffix += "bge_large_en"
 | 
			
		||||
	case "tao-8k":
 | 
			
		||||
		suffix += "tao_8k"
 | 
			
		||||
	default:
 | 
			
		||||
		suffix += meta.ActualModelName
 | 
			
		||||
	}
 | 
			
		||||
	fullRequestURL := fmt.Sprintf("%s/rpc/2.0/ai_custom/v1/wenxinworkshop/%s", meta.BaseURL, suffix)
 | 
			
		||||
	var accessToken string
 | 
			
		||||
	var err error
 | 
			
		||||
	if accessToken, err = GetAccessToken(meta.APIKey); err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
	fullRequestURL += "?access_token=" + accessToken
 | 
			
		||||
	return fullRequestURL, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
 | 
			
		||||
	channel.SetupCommonRequestHeader(c, req, meta)
 | 
			
		||||
	req.Header.Set("Authorization", "Bearer "+meta.APIKey)
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) {
 | 
			
		||||
	return nil, nil
 | 
			
		||||
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
	switch relayMode {
 | 
			
		||||
	case constant.RelayModeEmbeddings:
 | 
			
		||||
		baiduEmbeddingRequest := ConvertEmbeddingRequest(*request)
 | 
			
		||||
		return baiduEmbeddingRequest, nil
 | 
			
		||||
	default:
 | 
			
		||||
		baiduRequest := ConvertRequest(*request)
 | 
			
		||||
		return baiduRequest, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) {
 | 
			
		||||
	return nil, nil, nil
 | 
			
		||||
func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
 | 
			
		||||
	return channel.DoRequestHelper(a, c, meta, requestBody)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
 | 
			
		||||
	if meta.IsStream {
 | 
			
		||||
		err, usage = StreamHandler(c, resp)
 | 
			
		||||
	} else {
 | 
			
		||||
		switch meta.Mode {
 | 
			
		||||
		case constant.RelayModeEmbeddings:
 | 
			
		||||
			err, usage = EmbeddingHandler(c, resp)
 | 
			
		||||
		default:
 | 
			
		||||
			err, usage = Handler(c, resp)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetModelList() []string {
 | 
			
		||||
	return ModelList
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetChannelName() string {
 | 
			
		||||
	return "baidu"
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										13
									
								
								relay/channel/baidu/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								relay/channel/baidu/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,13 @@
 | 
			
		||||
package baidu
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"ERNIE-Bot-4",
 | 
			
		||||
	"ERNIE-Bot-8K",
 | 
			
		||||
	"ERNIE-Bot",
 | 
			
		||||
	"ERNIE-Speed",
 | 
			
		||||
	"ERNIE-Bot-turbo",
 | 
			
		||||
	"Embedding-V1",
 | 
			
		||||
	"bge-large-zh",
 | 
			
		||||
	"bge-large-en",
 | 
			
		||||
	"tao-8k",
 | 
			
		||||
}
 | 
			
		||||
@@ -6,13 +6,14 @@ import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channel/openai"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/constant"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/util"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
	"one-api/relay/constant"
 | 
			
		||||
	"one-api/relay/util"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
@@ -31,9 +32,16 @@ type Message struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ChatRequest struct {
 | 
			
		||||
	Messages []Message `json:"messages"`
 | 
			
		||||
	Stream   bool      `json:"stream"`
 | 
			
		||||
	UserId   string    `json:"user_id,omitempty"`
 | 
			
		||||
	Messages        []Message `json:"messages"`
 | 
			
		||||
	Temperature     float64   `json:"temperature,omitempty"`
 | 
			
		||||
	TopP            float64   `json:"top_p,omitempty"`
 | 
			
		||||
	PenaltyScore    float64   `json:"penalty_score,omitempty"`
 | 
			
		||||
	Stream          bool      `json:"stream,omitempty"`
 | 
			
		||||
	System          string    `json:"system,omitempty"`
 | 
			
		||||
	DisableSearch   bool      `json:"disable_search,omitempty"`
 | 
			
		||||
	EnableCitation  bool      `json:"enable_citation,omitempty"`
 | 
			
		||||
	MaxOutputTokens int       `json:"max_output_tokens,omitempty"`
 | 
			
		||||
	UserId          string    `json:"user_id,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Error struct {
 | 
			
		||||
@@ -43,35 +51,35 @@ type Error struct {
 | 
			
		||||
 | 
			
		||||
var baiduTokenStore sync.Map
 | 
			
		||||
 | 
			
		||||
func ConvertRequest(request openai.GeneralOpenAIRequest) *ChatRequest {
 | 
			
		||||
	messages := make([]Message, 0, len(request.Messages))
 | 
			
		||||
func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
 | 
			
		||||
	baiduRequest := ChatRequest{
 | 
			
		||||
		Messages:        make([]Message, 0, len(request.Messages)),
 | 
			
		||||
		Temperature:     request.Temperature,
 | 
			
		||||
		TopP:            request.TopP,
 | 
			
		||||
		PenaltyScore:    request.FrequencyPenalty,
 | 
			
		||||
		Stream:          request.Stream,
 | 
			
		||||
		DisableSearch:   false,
 | 
			
		||||
		EnableCitation:  false,
 | 
			
		||||
		MaxOutputTokens: request.MaxTokens,
 | 
			
		||||
		UserId:          request.User,
 | 
			
		||||
	}
 | 
			
		||||
	for _, message := range request.Messages {
 | 
			
		||||
		if message.Role == "system" {
 | 
			
		||||
			messages = append(messages, Message{
 | 
			
		||||
				Role:    "user",
 | 
			
		||||
				Content: message.StringContent(),
 | 
			
		||||
			})
 | 
			
		||||
			messages = append(messages, Message{
 | 
			
		||||
				Role:    "assistant",
 | 
			
		||||
				Content: "Okay",
 | 
			
		||||
			})
 | 
			
		||||
			baiduRequest.System = message.StringContent()
 | 
			
		||||
		} else {
 | 
			
		||||
			messages = append(messages, Message{
 | 
			
		||||
			baiduRequest.Messages = append(baiduRequest.Messages, Message{
 | 
			
		||||
				Role:    message.Role,
 | 
			
		||||
				Content: message.StringContent(),
 | 
			
		||||
			})
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return &ChatRequest{
 | 
			
		||||
		Messages: messages,
 | 
			
		||||
		Stream:   request.Stream,
 | 
			
		||||
	}
 | 
			
		||||
	return &baiduRequest
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func responseBaidu2OpenAI(response *ChatResponse) *openai.TextResponse {
 | 
			
		||||
	choice := openai.TextResponseChoice{
 | 
			
		||||
		Index: 0,
 | 
			
		||||
		Message: openai.Message{
 | 
			
		||||
		Message: model.Message{
 | 
			
		||||
			Role:    "assistant",
 | 
			
		||||
			Content: response.Result,
 | 
			
		||||
		},
 | 
			
		||||
@@ -103,7 +111,7 @@ func streamResponseBaidu2OpenAI(baiduResponse *ChatStreamResponse) *openai.ChatC
 | 
			
		||||
	return &response
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func ConvertEmbeddingRequest(request openai.GeneralOpenAIRequest) *EmbeddingRequest {
 | 
			
		||||
func ConvertEmbeddingRequest(request model.GeneralOpenAIRequest) *EmbeddingRequest {
 | 
			
		||||
	return &EmbeddingRequest{
 | 
			
		||||
		Input: request.ParseInput(),
 | 
			
		||||
	}
 | 
			
		||||
@@ -126,8 +134,8 @@ func embeddingResponseBaidu2OpenAI(response *EmbeddingResponse) *openai.Embeddin
 | 
			
		||||
	return &openAIEmbeddingResponse
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
 | 
			
		||||
	var usage openai.Usage
 | 
			
		||||
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	var usage model.Usage
 | 
			
		||||
	scanner := bufio.NewScanner(resp.Body)
 | 
			
		||||
	scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
 | 
			
		||||
		if atEOF && len(data) == 0 {
 | 
			
		||||
@@ -189,7 +197,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
 | 
			
		||||
	return nil, &usage
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
 | 
			
		||||
func Handler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	var baiduResponse ChatResponse
 | 
			
		||||
	responseBody, err := io.ReadAll(resp.Body)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -204,8 +212,8 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
 | 
			
		||||
		return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	if baiduResponse.ErrorMsg != "" {
 | 
			
		||||
		return &openai.ErrorWithStatusCode{
 | 
			
		||||
			Error: openai.Error{
 | 
			
		||||
		return &model.ErrorWithStatusCode{
 | 
			
		||||
			Error: model.Error{
 | 
			
		||||
				Message: baiduResponse.ErrorMsg,
 | 
			
		||||
				Type:    "baidu_error",
 | 
			
		||||
				Param:   "",
 | 
			
		||||
@@ -226,7 +234,7 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
 | 
			
		||||
	return nil, &fullTextResponse.Usage
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func EmbeddingHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
 | 
			
		||||
func EmbeddingHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	var baiduResponse EmbeddingResponse
 | 
			
		||||
	responseBody, err := io.ReadAll(resp.Body)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -241,8 +249,8 @@ func EmbeddingHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithSta
 | 
			
		||||
		return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	if baiduResponse.ErrorMsg != "" {
 | 
			
		||||
		return &openai.ErrorWithStatusCode{
 | 
			
		||||
			Error: openai.Error{
 | 
			
		||||
		return &model.ErrorWithStatusCode{
 | 
			
		||||
			Error: model.Error{
 | 
			
		||||
				Message: baiduResponse.ErrorMsg,
 | 
			
		||||
				Type:    "baidu_error",
 | 
			
		||||
				Param:   "",
 | 
			
		||||
 
 | 
			
		||||
@@ -1,18 +1,18 @@
 | 
			
		||||
package baidu
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type ChatResponse struct {
 | 
			
		||||
	Id               string       `json:"id"`
 | 
			
		||||
	Object           string       `json:"object"`
 | 
			
		||||
	Created          int64        `json:"created"`
 | 
			
		||||
	Result           string       `json:"result"`
 | 
			
		||||
	IsTruncated      bool         `json:"is_truncated"`
 | 
			
		||||
	NeedClearHistory bool         `json:"need_clear_history"`
 | 
			
		||||
	Usage            openai.Usage `json:"usage"`
 | 
			
		||||
	Id               string      `json:"id"`
 | 
			
		||||
	Object           string      `json:"object"`
 | 
			
		||||
	Created          int64       `json:"created"`
 | 
			
		||||
	Result           string      `json:"result"`
 | 
			
		||||
	IsTruncated      bool        `json:"is_truncated"`
 | 
			
		||||
	NeedClearHistory bool        `json:"need_clear_history"`
 | 
			
		||||
	Usage            model.Usage `json:"usage"`
 | 
			
		||||
	Error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -37,7 +37,7 @@ type EmbeddingResponse struct {
 | 
			
		||||
	Object  string          `json:"object"`
 | 
			
		||||
	Created int64           `json:"created"`
 | 
			
		||||
	Data    []EmbeddingData `json:"data"`
 | 
			
		||||
	Usage   openai.Usage    `json:"usage"`
 | 
			
		||||
	Usage   model.Usage     `json:"usage"`
 | 
			
		||||
	Error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										51
									
								
								relay/channel/common.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										51
									
								
								relay/channel/common.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,51 @@
 | 
			
		||||
package channel
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/util"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func SetupCommonRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) {
 | 
			
		||||
	req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
 | 
			
		||||
	req.Header.Set("Accept", c.Request.Header.Get("Accept"))
 | 
			
		||||
	if meta.IsStream && c.Request.Header.Get("Accept") == "" {
 | 
			
		||||
		req.Header.Set("Accept", "text/event-stream")
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func DoRequestHelper(a Adaptor, c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
 | 
			
		||||
	fullRequestURL, err := a.GetRequestURL(meta)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("get request url failed: %w", err)
 | 
			
		||||
	}
 | 
			
		||||
	req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("new request failed: %w", err)
 | 
			
		||||
	}
 | 
			
		||||
	err = a.SetupRequestHeader(c, req, meta)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("setup request header failed: %w", err)
 | 
			
		||||
	}
 | 
			
		||||
	resp, err := DoRequest(c, req)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("do request failed: %w", err)
 | 
			
		||||
	}
 | 
			
		||||
	return resp, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func DoRequest(c *gin.Context, req *http.Request) (*http.Response, error) {
 | 
			
		||||
	resp, err := util.HTTPClient.Do(req)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	if resp == nil {
 | 
			
		||||
		return nil, errors.New("resp is nil")
 | 
			
		||||
	}
 | 
			
		||||
	_ = req.Body.Close()
 | 
			
		||||
	_ = c.Request.Body.Close()
 | 
			
		||||
	return resp, nil
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										66
									
								
								relay/channel/gemini/adaptor.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										66
									
								
								relay/channel/gemini/adaptor.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,66 @@
 | 
			
		||||
package gemini
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	channelhelper "github.com/songquanpeng/one-api/relay/channel"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channel/openai"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/util"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Adaptor struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) Init(meta *util.RelayMeta) {
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
 | 
			
		||||
	version := helper.AssignOrDefault(meta.APIVersion, "v1")
 | 
			
		||||
	action := "generateContent"
 | 
			
		||||
	if meta.IsStream {
 | 
			
		||||
		action = "streamGenerateContent"
 | 
			
		||||
	}
 | 
			
		||||
	return fmt.Sprintf("%s/%s/models/%s:%s", meta.BaseURL, version, meta.ActualModelName, action), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
 | 
			
		||||
	channelhelper.SetupCommonRequestHeader(c, req, meta)
 | 
			
		||||
	req.Header.Set("x-goog-api-key", meta.APIKey)
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
	return ConvertRequest(*request), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
 | 
			
		||||
	return channelhelper.DoRequestHelper(a, c, meta, requestBody)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
 | 
			
		||||
	if meta.IsStream {
 | 
			
		||||
		var responseText string
 | 
			
		||||
		err, responseText = StreamHandler(c, resp)
 | 
			
		||||
		usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
 | 
			
		||||
	} else {
 | 
			
		||||
		err, usage = Handler(c, resp, meta.PromptTokens, meta.ActualModelName)
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetModelList() []string {
 | 
			
		||||
	return ModelList
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetChannelName() string {
 | 
			
		||||
	return "google gemini"
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										6
									
								
								relay/channel/gemini/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								relay/channel/gemini/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,6 @@
 | 
			
		||||
package gemini
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"gemini-pro", "gemini-1.0-pro-001",
 | 
			
		||||
	"gemini-pro-vision", "gemini-1.0-pro-vision-001",
 | 
			
		||||
}
 | 
			
		||||
@@ -1,18 +1,19 @@
 | 
			
		||||
package google
 | 
			
		||||
package gemini
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bufio"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/image"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channel/openai"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/constant"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/common"
 | 
			
		||||
	"one-api/common/config"
 | 
			
		||||
	"one-api/common/helper"
 | 
			
		||||
	"one-api/common/image"
 | 
			
		||||
	"one-api/common/logger"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
	"one-api/relay/constant"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
@@ -21,14 +22,14 @@ import (
 | 
			
		||||
// https://ai.google.dev/docs/gemini_api_overview?hl=zh-cn
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	GeminiVisionMaxImageNum = 16
 | 
			
		||||
	VisionMaxImageNum = 16
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Setting safety to the lowest possible values since Gemini is already powerless enough
 | 
			
		||||
func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRequest {
 | 
			
		||||
	geminiRequest := GeminiChatRequest{
 | 
			
		||||
		Contents: make([]GeminiChatContent, 0, len(textRequest.Messages)),
 | 
			
		||||
		SafetySettings: []GeminiChatSafetySettings{
 | 
			
		||||
func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
 | 
			
		||||
	geminiRequest := ChatRequest{
 | 
			
		||||
		Contents: make([]ChatContent, 0, len(textRequest.Messages)),
 | 
			
		||||
		SafetySettings: []ChatSafetySettings{
 | 
			
		||||
			{
 | 
			
		||||
				Category:  "HARM_CATEGORY_HARASSMENT",
 | 
			
		||||
				Threshold: config.GeminiSafetySetting,
 | 
			
		||||
@@ -46,14 +47,14 @@ func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRe
 | 
			
		||||
				Threshold: config.GeminiSafetySetting,
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		GenerationConfig: GeminiChatGenerationConfig{
 | 
			
		||||
		GenerationConfig: ChatGenerationConfig{
 | 
			
		||||
			Temperature:     textRequest.Temperature,
 | 
			
		||||
			TopP:            textRequest.TopP,
 | 
			
		||||
			MaxOutputTokens: textRequest.MaxTokens,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	if textRequest.Functions != nil {
 | 
			
		||||
		geminiRequest.Tools = []GeminiChatTools{
 | 
			
		||||
		geminiRequest.Tools = []ChatTools{
 | 
			
		||||
			{
 | 
			
		||||
				FunctionDeclarations: textRequest.Functions,
 | 
			
		||||
			},
 | 
			
		||||
@@ -61,30 +62,30 @@ func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRe
 | 
			
		||||
	}
 | 
			
		||||
	shouldAddDummyModelMessage := false
 | 
			
		||||
	for _, message := range textRequest.Messages {
 | 
			
		||||
		content := GeminiChatContent{
 | 
			
		||||
		content := ChatContent{
 | 
			
		||||
			Role: message.Role,
 | 
			
		||||
			Parts: []GeminiPart{
 | 
			
		||||
			Parts: []Part{
 | 
			
		||||
				{
 | 
			
		||||
					Text: message.StringContent(),
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		}
 | 
			
		||||
		openaiContent := message.ParseContent()
 | 
			
		||||
		var parts []GeminiPart
 | 
			
		||||
		var parts []Part
 | 
			
		||||
		imageNum := 0
 | 
			
		||||
		for _, part := range openaiContent {
 | 
			
		||||
			if part.Type == openai.ContentTypeText {
 | 
			
		||||
				parts = append(parts, GeminiPart{
 | 
			
		||||
			if part.Type == model.ContentTypeText {
 | 
			
		||||
				parts = append(parts, Part{
 | 
			
		||||
					Text: part.Text,
 | 
			
		||||
				})
 | 
			
		||||
			} else if part.Type == openai.ContentTypeImageURL {
 | 
			
		||||
			} else if part.Type == model.ContentTypeImageURL {
 | 
			
		||||
				imageNum += 1
 | 
			
		||||
				if imageNum > GeminiVisionMaxImageNum {
 | 
			
		||||
				if imageNum > VisionMaxImageNum {
 | 
			
		||||
					continue
 | 
			
		||||
				}
 | 
			
		||||
				mimeType, data, _ := image.GetImageFromUrl(part.ImageURL.Url)
 | 
			
		||||
				parts = append(parts, GeminiPart{
 | 
			
		||||
					InlineData: &GeminiInlineData{
 | 
			
		||||
				parts = append(parts, Part{
 | 
			
		||||
					InlineData: &InlineData{
 | 
			
		||||
						MimeType: mimeType,
 | 
			
		||||
						Data:     data,
 | 
			
		||||
					},
 | 
			
		||||
@@ -106,9 +107,9 @@ func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRe
 | 
			
		||||
 | 
			
		||||
		// If a system message is the last message, we need to add a dummy model message to make gemini happy
 | 
			
		||||
		if shouldAddDummyModelMessage {
 | 
			
		||||
			geminiRequest.Contents = append(geminiRequest.Contents, GeminiChatContent{
 | 
			
		||||
			geminiRequest.Contents = append(geminiRequest.Contents, ChatContent{
 | 
			
		||||
				Role: "model",
 | 
			
		||||
				Parts: []GeminiPart{
 | 
			
		||||
				Parts: []Part{
 | 
			
		||||
					{
 | 
			
		||||
						Text: "Okay",
 | 
			
		||||
					},
 | 
			
		||||
@@ -121,12 +122,12 @@ func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRe
 | 
			
		||||
	return &geminiRequest
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type GeminiChatResponse struct {
 | 
			
		||||
	Candidates     []GeminiChatCandidate    `json:"candidates"`
 | 
			
		||||
	PromptFeedback GeminiChatPromptFeedback `json:"promptFeedback"`
 | 
			
		||||
type ChatResponse struct {
 | 
			
		||||
	Candidates     []ChatCandidate    `json:"candidates"`
 | 
			
		||||
	PromptFeedback ChatPromptFeedback `json:"promptFeedback"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (g *GeminiChatResponse) GetResponseText() string {
 | 
			
		||||
func (g *ChatResponse) GetResponseText() string {
 | 
			
		||||
	if g == nil {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
@@ -136,23 +137,23 @@ func (g *GeminiChatResponse) GetResponseText() string {
 | 
			
		||||
	return ""
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type GeminiChatCandidate struct {
 | 
			
		||||
	Content       GeminiChatContent        `json:"content"`
 | 
			
		||||
	FinishReason  string                   `json:"finishReason"`
 | 
			
		||||
	Index         int64                    `json:"index"`
 | 
			
		||||
	SafetyRatings []GeminiChatSafetyRating `json:"safetyRatings"`
 | 
			
		||||
type ChatCandidate struct {
 | 
			
		||||
	Content       ChatContent        `json:"content"`
 | 
			
		||||
	FinishReason  string             `json:"finishReason"`
 | 
			
		||||
	Index         int64              `json:"index"`
 | 
			
		||||
	SafetyRatings []ChatSafetyRating `json:"safetyRatings"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type GeminiChatSafetyRating struct {
 | 
			
		||||
type ChatSafetyRating struct {
 | 
			
		||||
	Category    string `json:"category"`
 | 
			
		||||
	Probability string `json:"probability"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type GeminiChatPromptFeedback struct {
 | 
			
		||||
	SafetyRatings []GeminiChatSafetyRating `json:"safetyRatings"`
 | 
			
		||||
type ChatPromptFeedback struct {
 | 
			
		||||
	SafetyRatings []ChatSafetyRating `json:"safetyRatings"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func responseGeminiChat2OpenAI(response *GeminiChatResponse) *openai.TextResponse {
 | 
			
		||||
func responseGeminiChat2OpenAI(response *ChatResponse) *openai.TextResponse {
 | 
			
		||||
	fullTextResponse := openai.TextResponse{
 | 
			
		||||
		Id:      fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
 | 
			
		||||
		Object:  "chat.completion",
 | 
			
		||||
@@ -162,7 +163,7 @@ func responseGeminiChat2OpenAI(response *GeminiChatResponse) *openai.TextRespons
 | 
			
		||||
	for i, candidate := range response.Candidates {
 | 
			
		||||
		choice := openai.TextResponseChoice{
 | 
			
		||||
			Index: i,
 | 
			
		||||
			Message: openai.Message{
 | 
			
		||||
			Message: model.Message{
 | 
			
		||||
				Role:    "assistant",
 | 
			
		||||
				Content: "",
 | 
			
		||||
			},
 | 
			
		||||
@@ -176,7 +177,7 @@ func responseGeminiChat2OpenAI(response *GeminiChatResponse) *openai.TextRespons
 | 
			
		||||
	return &fullTextResponse
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) *openai.ChatCompletionsStreamResponse {
 | 
			
		||||
func streamResponseGeminiChat2OpenAI(geminiResponse *ChatResponse) *openai.ChatCompletionsStreamResponse {
 | 
			
		||||
	var choice openai.ChatCompletionsStreamResponseChoice
 | 
			
		||||
	choice.Delta.Content = geminiResponse.GetResponseText()
 | 
			
		||||
	choice.FinishReason = &constant.StopFinishReason
 | 
			
		||||
@@ -187,7 +188,7 @@ func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) *openai
 | 
			
		||||
	return &response
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, string) {
 | 
			
		||||
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, string) {
 | 
			
		||||
	responseText := ""
 | 
			
		||||
	dataChan := make(chan string)
 | 
			
		||||
	stopChan := make(chan bool)
 | 
			
		||||
@@ -257,7 +258,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
 | 
			
		||||
	return nil, responseText
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GeminiHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*openai.ErrorWithStatusCode, *openai.Usage) {
 | 
			
		||||
func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	responseBody, err := io.ReadAll(resp.Body)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
@@ -266,14 +267,14 @@ func GeminiHandler(c *gin.Context, resp *http.Response, promptTokens int, model
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	var geminiResponse GeminiChatResponse
 | 
			
		||||
	var geminiResponse ChatResponse
 | 
			
		||||
	err = json.Unmarshal(responseBody, &geminiResponse)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	if len(geminiResponse.Candidates) == 0 {
 | 
			
		||||
		return &openai.ErrorWithStatusCode{
 | 
			
		||||
			Error: openai.Error{
 | 
			
		||||
		return &model.ErrorWithStatusCode{
 | 
			
		||||
			Error: model.Error{
 | 
			
		||||
				Message: "No candidates returned",
 | 
			
		||||
				Type:    "server_error",
 | 
			
		||||
				Param:   "",
 | 
			
		||||
@@ -283,9 +284,9 @@ func GeminiHandler(c *gin.Context, resp *http.Response, promptTokens int, model
 | 
			
		||||
		}, nil
 | 
			
		||||
	}
 | 
			
		||||
	fullTextResponse := responseGeminiChat2OpenAI(&geminiResponse)
 | 
			
		||||
	fullTextResponse.Model = model
 | 
			
		||||
	completionTokens := openai.CountTokenText(geminiResponse.GetResponseText(), model)
 | 
			
		||||
	usage := openai.Usage{
 | 
			
		||||
	fullTextResponse.Model = modelName
 | 
			
		||||
	completionTokens := openai.CountTokenText(geminiResponse.GetResponseText(), modelName)
 | 
			
		||||
	usage := model.Usage{
 | 
			
		||||
		PromptTokens:     promptTokens,
 | 
			
		||||
		CompletionTokens: completionTokens,
 | 
			
		||||
		TotalTokens:      promptTokens + completionTokens,
 | 
			
		||||
							
								
								
									
										41
									
								
								relay/channel/gemini/model.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										41
									
								
								relay/channel/gemini/model.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,41 @@
 | 
			
		||||
package gemini
 | 
			
		||||
 | 
			
		||||
type ChatRequest struct {
 | 
			
		||||
	Contents         []ChatContent        `json:"contents"`
 | 
			
		||||
	SafetySettings   []ChatSafetySettings `json:"safety_settings,omitempty"`
 | 
			
		||||
	GenerationConfig ChatGenerationConfig `json:"generation_config,omitempty"`
 | 
			
		||||
	Tools            []ChatTools          `json:"tools,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type InlineData struct {
 | 
			
		||||
	MimeType string `json:"mimeType"`
 | 
			
		||||
	Data     string `json:"data"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Part struct {
 | 
			
		||||
	Text       string      `json:"text,omitempty"`
 | 
			
		||||
	InlineData *InlineData `json:"inlineData,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ChatContent struct {
 | 
			
		||||
	Role  string `json:"role,omitempty"`
 | 
			
		||||
	Parts []Part `json:"parts"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ChatSafetySettings struct {
 | 
			
		||||
	Category  string `json:"category"`
 | 
			
		||||
	Threshold string `json:"threshold"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ChatTools struct {
 | 
			
		||||
	FunctionDeclarations any `json:"functionDeclarations,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ChatGenerationConfig struct {
 | 
			
		||||
	Temperature     float64  `json:"temperature,omitempty"`
 | 
			
		||||
	TopP            float64  `json:"topP,omitempty"`
 | 
			
		||||
	TopK            float64  `json:"topK,omitempty"`
 | 
			
		||||
	MaxOutputTokens int      `json:"maxOutputTokens,omitempty"`
 | 
			
		||||
	CandidateCount  int      `json:"candidateCount,omitempty"`
 | 
			
		||||
	StopSequences   []string `json:"stopSequences,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
@@ -1,22 +0,0 @@
 | 
			
		||||
package google
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Adaptor struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) Auth(c *gin.Context) error {
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) {
 | 
			
		||||
	return nil, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) {
 | 
			
		||||
	return nil, nil, nil
 | 
			
		||||
}
 | 
			
		||||
@@ -1,80 +0,0 @@
 | 
			
		||||
package google
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type GeminiChatRequest struct {
 | 
			
		||||
	Contents         []GeminiChatContent        `json:"contents"`
 | 
			
		||||
	SafetySettings   []GeminiChatSafetySettings `json:"safety_settings,omitempty"`
 | 
			
		||||
	GenerationConfig GeminiChatGenerationConfig `json:"generation_config,omitempty"`
 | 
			
		||||
	Tools            []GeminiChatTools          `json:"tools,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type GeminiInlineData struct {
 | 
			
		||||
	MimeType string `json:"mimeType"`
 | 
			
		||||
	Data     string `json:"data"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type GeminiPart struct {
 | 
			
		||||
	Text       string            `json:"text,omitempty"`
 | 
			
		||||
	InlineData *GeminiInlineData `json:"inlineData,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type GeminiChatContent struct {
 | 
			
		||||
	Role  string       `json:"role,omitempty"`
 | 
			
		||||
	Parts []GeminiPart `json:"parts"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type GeminiChatSafetySettings struct {
 | 
			
		||||
	Category  string `json:"category"`
 | 
			
		||||
	Threshold string `json:"threshold"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type GeminiChatTools struct {
 | 
			
		||||
	FunctionDeclarations any `json:"functionDeclarations,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type GeminiChatGenerationConfig struct {
 | 
			
		||||
	Temperature     float64  `json:"temperature,omitempty"`
 | 
			
		||||
	TopP            float64  `json:"topP,omitempty"`
 | 
			
		||||
	TopK            float64  `json:"topK,omitempty"`
 | 
			
		||||
	MaxOutputTokens int      `json:"maxOutputTokens,omitempty"`
 | 
			
		||||
	CandidateCount  int      `json:"candidateCount,omitempty"`
 | 
			
		||||
	StopSequences   []string `json:"stopSequences,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type PaLMChatMessage struct {
 | 
			
		||||
	Author  string `json:"author"`
 | 
			
		||||
	Content string `json:"content"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type PaLMFilter struct {
 | 
			
		||||
	Reason  string `json:"reason"`
 | 
			
		||||
	Message string `json:"message"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type PaLMPrompt struct {
 | 
			
		||||
	Messages []PaLMChatMessage `json:"messages"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type PaLMChatRequest struct {
 | 
			
		||||
	Prompt         PaLMPrompt `json:"prompt"`
 | 
			
		||||
	Temperature    float64    `json:"temperature,omitempty"`
 | 
			
		||||
	CandidateCount int        `json:"candidateCount,omitempty"`
 | 
			
		||||
	TopP           float64    `json:"topP,omitempty"`
 | 
			
		||||
	TopK           int        `json:"topK,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type PaLMError struct {
 | 
			
		||||
	Code    int    `json:"code"`
 | 
			
		||||
	Message string `json:"message"`
 | 
			
		||||
	Status  string `json:"status"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type PaLMChatResponse struct {
 | 
			
		||||
	Candidates []PaLMChatMessage `json:"candidates"`
 | 
			
		||||
	Messages   []openai.Message  `json:"messages"`
 | 
			
		||||
	Filters    []PaLMFilter      `json:"filters"`
 | 
			
		||||
	Error      PaLMError         `json:"error"`
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										10
									
								
								relay/channel/groq/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								relay/channel/groq/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,10 @@
 | 
			
		||||
package groq
 | 
			
		||||
 | 
			
		||||
// https://console.groq.com/docs/models
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"gemma-7b-it",
 | 
			
		||||
	"llama2-7b-2048",
 | 
			
		||||
	"llama2-70b-4096",
 | 
			
		||||
	"mixtral-8x7b-32768",
 | 
			
		||||
}
 | 
			
		||||
@@ -2,14 +2,19 @@ package channel
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/util"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"one-api/relay/channel/openai"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Adaptor interface {
 | 
			
		||||
	GetRequestURL() string
 | 
			
		||||
	Auth(c *gin.Context) error
 | 
			
		||||
	ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error)
 | 
			
		||||
	DoRequest(request *openai.GeneralOpenAIRequest) error
 | 
			
		||||
	DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error)
 | 
			
		||||
	Init(meta *util.RelayMeta)
 | 
			
		||||
	GetRequestURL(meta *util.RelayMeta) (string, error)
 | 
			
		||||
	SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error
 | 
			
		||||
	ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error)
 | 
			
		||||
	DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error)
 | 
			
		||||
	DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode)
 | 
			
		||||
	GetModelList() []string
 | 
			
		||||
	GetChannelName() string
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										9
									
								
								relay/channel/lingyiwanwu/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								relay/channel/lingyiwanwu/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,9 @@
 | 
			
		||||
package lingyiwanwu
 | 
			
		||||
 | 
			
		||||
// https://platform.lingyiwanwu.com/docs
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"yi-34b-chat-0205",
 | 
			
		||||
	"yi-34b-chat-200k",
 | 
			
		||||
	"yi-vl-plus",
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										7
									
								
								relay/channel/minimax/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								relay/channel/minimax/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,7 @@
 | 
			
		||||
package minimax
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"abab5.5s-chat",
 | 
			
		||||
	"abab5.5-chat",
 | 
			
		||||
	"abab6-chat",
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										14
									
								
								relay/channel/minimax/main.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								relay/channel/minimax/main.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,14 @@
 | 
			
		||||
package minimax
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/constant"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/util"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func GetRequestURL(meta *util.RelayMeta) (string, error) {
 | 
			
		||||
	if meta.Mode == constant.RelayModeChatCompletions {
 | 
			
		||||
		return fmt.Sprintf("%s/v1/text/chatcompletion_v2", meta.BaseURL), nil
 | 
			
		||||
	}
 | 
			
		||||
	return "", fmt.Errorf("unsupported relay mode %d for minimax", meta.Mode)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										10
									
								
								relay/channel/mistral/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								relay/channel/mistral/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,10 @@
 | 
			
		||||
package mistral
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"open-mistral-7b",
 | 
			
		||||
	"open-mixtral-8x7b",
 | 
			
		||||
	"mistral-small-latest",
 | 
			
		||||
	"mistral-medium-latest",
 | 
			
		||||
	"mistral-large-latest",
 | 
			
		||||
	"mistral-embed",
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										7
									
								
								relay/channel/moonshot/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								relay/channel/moonshot/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,7 @@
 | 
			
		||||
package moonshot
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"moonshot-v1-8k",
 | 
			
		||||
	"moonshot-v1-32k",
 | 
			
		||||
	"moonshot-v1-128k",
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										65
									
								
								relay/channel/ollama/adaptor.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										65
									
								
								relay/channel/ollama/adaptor.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,65 @@
 | 
			
		||||
package ollama
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channel"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/constant"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/util"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Adaptor struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) Init(meta *util.RelayMeta) {
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
 | 
			
		||||
	// https://github.com/ollama/ollama/blob/main/docs/api.md
 | 
			
		||||
	fullRequestURL := fmt.Sprintf("%s/api/chat", meta.BaseURL)
 | 
			
		||||
	return fullRequestURL, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
 | 
			
		||||
	channel.SetupCommonRequestHeader(c, req, meta)
 | 
			
		||||
	req.Header.Set("Authorization", "Bearer "+meta.APIKey)
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
	switch relayMode {
 | 
			
		||||
	case constant.RelayModeEmbeddings:
 | 
			
		||||
		return nil, errors.New("not supported")
 | 
			
		||||
	default:
 | 
			
		||||
		return ConvertRequest(*request), nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
 | 
			
		||||
	return channel.DoRequestHelper(a, c, meta, requestBody)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
 | 
			
		||||
	if meta.IsStream {
 | 
			
		||||
		err, usage = StreamHandler(c, resp)
 | 
			
		||||
	} else {
 | 
			
		||||
		err, usage = Handler(c, resp)
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetModelList() []string {
 | 
			
		||||
	return ModelList
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetChannelName() string {
 | 
			
		||||
	return "ollama"
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										5
									
								
								relay/channel/ollama/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								relay/channel/ollama/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,5 @@
 | 
			
		||||
package ollama
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"qwen:0.5b-chat",
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										178
									
								
								relay/channel/ollama/main.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										178
									
								
								relay/channel/ollama/main.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,178 @@
 | 
			
		||||
package ollama
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bufio"
 | 
			
		||||
	"context"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channel/openai"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/constant"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strings"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
 | 
			
		||||
	ollamaRequest := ChatRequest{
 | 
			
		||||
		Model: request.Model,
 | 
			
		||||
		Options: &Options{
 | 
			
		||||
			Seed:             int(request.Seed),
 | 
			
		||||
			Temperature:      request.Temperature,
 | 
			
		||||
			TopP:             request.TopP,
 | 
			
		||||
			FrequencyPenalty: request.FrequencyPenalty,
 | 
			
		||||
			PresencePenalty:  request.PresencePenalty,
 | 
			
		||||
		},
 | 
			
		||||
		Stream: request.Stream,
 | 
			
		||||
	}
 | 
			
		||||
	for _, message := range request.Messages {
 | 
			
		||||
		ollamaRequest.Messages = append(ollamaRequest.Messages, Message{
 | 
			
		||||
			Role:    message.Role,
 | 
			
		||||
			Content: message.StringContent(),
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
	return &ollamaRequest
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func responseOllama2OpenAI(response *ChatResponse) *openai.TextResponse {
 | 
			
		||||
	choice := openai.TextResponseChoice{
 | 
			
		||||
		Index: 0,
 | 
			
		||||
		Message: model.Message{
 | 
			
		||||
			Role:    response.Message.Role,
 | 
			
		||||
			Content: response.Message.Content,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	if response.Done {
 | 
			
		||||
		choice.FinishReason = "stop"
 | 
			
		||||
	}
 | 
			
		||||
	fullTextResponse := openai.TextResponse{
 | 
			
		||||
		Id:      fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
 | 
			
		||||
		Object:  "chat.completion",
 | 
			
		||||
		Created: helper.GetTimestamp(),
 | 
			
		||||
		Choices: []openai.TextResponseChoice{choice},
 | 
			
		||||
		Usage: model.Usage{
 | 
			
		||||
			PromptTokens:     response.PromptEvalCount,
 | 
			
		||||
			CompletionTokens: response.EvalCount,
 | 
			
		||||
			TotalTokens:      response.PromptEvalCount + response.EvalCount,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	return &fullTextResponse
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func streamResponseOllama2OpenAI(ollamaResponse *ChatResponse) *openai.ChatCompletionsStreamResponse {
 | 
			
		||||
	var choice openai.ChatCompletionsStreamResponseChoice
 | 
			
		||||
	choice.Delta.Role = ollamaResponse.Message.Role
 | 
			
		||||
	choice.Delta.Content = ollamaResponse.Message.Content
 | 
			
		||||
	if ollamaResponse.Done {
 | 
			
		||||
		choice.FinishReason = &constant.StopFinishReason
 | 
			
		||||
	}
 | 
			
		||||
	response := openai.ChatCompletionsStreamResponse{
 | 
			
		||||
		Id:      fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
 | 
			
		||||
		Object:  "chat.completion.chunk",
 | 
			
		||||
		Created: helper.GetTimestamp(),
 | 
			
		||||
		Model:   ollamaResponse.Model,
 | 
			
		||||
		Choices: []openai.ChatCompletionsStreamResponseChoice{choice},
 | 
			
		||||
	}
 | 
			
		||||
	return &response
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	var usage model.Usage
 | 
			
		||||
	scanner := bufio.NewScanner(resp.Body)
 | 
			
		||||
	scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
 | 
			
		||||
		if atEOF && len(data) == 0 {
 | 
			
		||||
			return 0, nil, nil
 | 
			
		||||
		}
 | 
			
		||||
		if i := strings.Index(string(data), "}\n"); i >= 0 {
 | 
			
		||||
			return i + 2, data[0:i], nil
 | 
			
		||||
		}
 | 
			
		||||
		if atEOF {
 | 
			
		||||
			return len(data), data, nil
 | 
			
		||||
		}
 | 
			
		||||
		return 0, nil, nil
 | 
			
		||||
	})
 | 
			
		||||
	dataChan := make(chan string)
 | 
			
		||||
	stopChan := make(chan bool)
 | 
			
		||||
	go func() {
 | 
			
		||||
		for scanner.Scan() {
 | 
			
		||||
			data := strings.TrimPrefix(scanner.Text(), "}")
 | 
			
		||||
			dataChan <- data + "}"
 | 
			
		||||
		}
 | 
			
		||||
		stopChan <- true
 | 
			
		||||
	}()
 | 
			
		||||
	common.SetEventStreamHeaders(c)
 | 
			
		||||
	c.Stream(func(w io.Writer) bool {
 | 
			
		||||
		select {
 | 
			
		||||
		case data := <-dataChan:
 | 
			
		||||
			var ollamaResponse ChatResponse
 | 
			
		||||
			err := json.Unmarshal([]byte(data), &ollamaResponse)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				logger.SysError("error unmarshalling stream response: " + err.Error())
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
			if ollamaResponse.EvalCount != 0 {
 | 
			
		||||
				usage.PromptTokens = ollamaResponse.PromptEvalCount
 | 
			
		||||
				usage.CompletionTokens = ollamaResponse.EvalCount
 | 
			
		||||
				usage.TotalTokens = ollamaResponse.PromptEvalCount + ollamaResponse.EvalCount
 | 
			
		||||
			}
 | 
			
		||||
			response := streamResponseOllama2OpenAI(&ollamaResponse)
 | 
			
		||||
			jsonResponse, err := json.Marshal(response)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				logger.SysError("error marshalling stream response: " + err.Error())
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
			c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
 | 
			
		||||
			return true
 | 
			
		||||
		case <-stopChan:
 | 
			
		||||
			c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
	})
 | 
			
		||||
	err := resp.Body.Close()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	return nil, &usage
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Handler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	ctx := context.TODO()
 | 
			
		||||
	var ollamaResponse ChatResponse
 | 
			
		||||
	responseBody, err := io.ReadAll(resp.Body)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	logger.Debugf(ctx, "ollama response: %s", string(responseBody))
 | 
			
		||||
	err = resp.Body.Close()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	err = json.Unmarshal(responseBody, &ollamaResponse)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	if ollamaResponse.Error != "" {
 | 
			
		||||
		return &model.ErrorWithStatusCode{
 | 
			
		||||
			Error: model.Error{
 | 
			
		||||
				Message: ollamaResponse.Error,
 | 
			
		||||
				Type:    "ollama_error",
 | 
			
		||||
				Param:   "",
 | 
			
		||||
				Code:    "ollama_error",
 | 
			
		||||
			},
 | 
			
		||||
			StatusCode: resp.StatusCode,
 | 
			
		||||
		}, nil
 | 
			
		||||
	}
 | 
			
		||||
	fullTextResponse := responseOllama2OpenAI(&ollamaResponse)
 | 
			
		||||
	jsonResponse, err := json.Marshal(fullTextResponse)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	c.Writer.Header().Set("Content-Type", "application/json")
 | 
			
		||||
	c.Writer.WriteHeader(resp.StatusCode)
 | 
			
		||||
	_, err = c.Writer.Write(jsonResponse)
 | 
			
		||||
	return nil, &fullTextResponse.Usage
 | 
			
		||||
}
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user