mirror of
				https://github.com/songquanpeng/one-api.git
				synced 2025-11-04 07:43:41 +08:00 
			
		
		
		
	Compare commits
	
		
			41 Commits
		
	
	
		
			v0.6.11-al
			...
			5f6b515bb3
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					5f6b515bb3 | ||
| 
						 | 
					638a4fb77d | ||
| 
						 | 
					4e2430e5d3 | ||
| 
						 | 
					b2d6aa783b | ||
| 
						 | 
					761ee32d19 | ||
| 
						 | 
					c426b64b3d | ||
| 
						 | 
					eaef9629a4 | ||
| 
						 | 
					d236477531 | ||
| 
						 | 
					34c7523f01 | ||
| 
						 | 
					c893672635 | ||
| 
						 | 
					bbfaf1fb95 | ||
| 
						 | 
					adcf4712e6 | ||
| 
						 | 
					969fdca9ef | ||
| 
						 | 
					6708eed8a0 | ||
| 
						 | 
					ad63c9e66f | ||
| 
						 | 
					76e8199026 | ||
| 
						 | 
					413fcde382 | ||
| 
						 | 
					6e634b85cf | ||
| 
						 | 
					a0d7d5a965 | ||
| 
						 | 
					de10e102bd | ||
| 
						 | 
					c61d6440f9 | ||
| 
						 | 
					3a8924d7af | ||
| 
						 | 
					95527d76ef | ||
| 
						 | 
					7ec33793b7 | ||
| 
						 | 
					1a6812182b | ||
| 
						 | 
					5ba60433d7 | ||
| 
						 | 
					480f248a3d | ||
| 
						 | 
					7ac553541b | ||
| 
						 | 
					a5c517c27a | ||
| 
						 | 
					3f421c4f04 | ||
| 
						 | 
					1ce6a226f6 | ||
| 
						 | 
					cafd0a0327 | ||
| 
						 | 
					8b8cd03e85 | ||
| 
						 | 
					54c38de813 | ||
| 
						 | 
					d6284bf6b0 | ||
| 
						 | 
					df5d2ca93d | ||
| 
						 | 
					fef7ae048b | ||
| 
						 | 
					6916debf66 | ||
| 
						 | 
					53da209134 | ||
| 
						 | 
					517f6ad211 | ||
| 
						 | 
					10aba11f18 | 
							
								
								
									
										3
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							@@ -12,4 +12,5 @@ cmd.md
 | 
			
		||||
.env
 | 
			
		||||
/one-api
 | 
			
		||||
temp
 | 
			
		||||
.DS_Store
 | 
			
		||||
.DS_Store
 | 
			
		||||
/__debug_bin*
 | 
			
		||||
 
 | 
			
		||||
@@ -44,4 +44,4 @@ COPY --from=builder2 /build/one-api /
 | 
			
		||||
 | 
			
		||||
EXPOSE 3000
 | 
			
		||||
WORKDIR /data
 | 
			
		||||
ENTRYPOINT ["/one-api"]
 | 
			
		||||
ENTRYPOINT ["/one-api"]
 | 
			
		||||
 
 | 
			
		||||
@@ -115,7 +115,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用 
 | 
			
		||||
19. 支持丰富的**自定义**设置,
 | 
			
		||||
    1. 支持自定义系统名称,logo 以及页脚。
 | 
			
		||||
    2. 支持自定义首页和关于页面,可以选择使用 HTML & Markdown 代码进行自定义,或者使用一个单独的网页通过 iframe 嵌入。
 | 
			
		||||
20. 支持通过系统访问令牌调用管理 API,进而**在无需二开的情况下扩展和自定义** One API 的功能,详情请参考此处 [API 文档](./docs/API.md)。。
 | 
			
		||||
20. 支持通过系统访问令牌调用管理 API,进而**在无需二开的情况下扩展和自定义** One API 的功能,详情请参考此处 [API 文档](./docs/API.md)。
 | 
			
		||||
21. 支持 Cloudflare Turnstile 用户校验。
 | 
			
		||||
22. 支持用户管理,支持**多种用户登录注册方式**:
 | 
			
		||||
    + 邮箱登录注册(支持注册邮箱白名单)以及通过邮箱进行密码重置。
 | 
			
		||||
@@ -385,7 +385,7 @@ graph LR
 | 
			
		||||
   + 例子:`NODE_TYPE=slave`
 | 
			
		||||
9. `CHANNEL_UPDATE_FREQUENCY`:设置之后将定期更新渠道余额,单位为分钟,未设置则不进行更新。
 | 
			
		||||
   + 例子:`CHANNEL_UPDATE_FREQUENCY=1440`
 | 
			
		||||
10. `CHANNEL_TEST_FREQUENCY`:设置之后将定期检查渠道,单位为分钟,未设置则不进行检查。 
 | 
			
		||||
10. `CHANNEL_TEST_FREQUENCY`:设置之后将定期检查渠道,单位为分钟,未设置则不进行检查。
 | 
			
		||||
   +例子:`CHANNEL_TEST_FREQUENCY=1440`
 | 
			
		||||
11. `POLLING_INTERVAL`:批量更新渠道余额以及测试可用性时的请求间隔,单位为秒,默认无间隔。
 | 
			
		||||
    + 例子:`POLLING_INTERVAL=5`
 | 
			
		||||
 
 | 
			
		||||
@@ -163,4 +163,7 @@ var UserContentRequestProxy = env.String("USER_CONTENT_REQUEST_PROXY", "")
 | 
			
		||||
var UserContentRequestTimeout = env.Int("USER_CONTENT_REQUEST_TIMEOUT", 30)
 | 
			
		||||
 | 
			
		||||
var EnforceIncludeUsage = env.Bool("ENFORCE_INCLUDE_USAGE", false)
 | 
			
		||||
var TestPrompt = env.String("TEST_PROMPT", "Print your model name exactly and do not output without any other text.")
 | 
			
		||||
var TestPrompt = env.String("TEST_PROMPT", "Output only your specific model name with no additional text.")
 | 
			
		||||
 | 
			
		||||
// OpenrouterProviderSort is used to determine the order of the providers in the openrouter
 | 
			
		||||
var OpenrouterProviderSort = env.String("OPENROUTER_PROVIDER_SORT", "")
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,9 @@
 | 
			
		||||
package conv
 | 
			
		||||
 | 
			
		||||
func AsString(v any) string {
 | 
			
		||||
	str, _ := v.(string)
 | 
			
		||||
	return str
 | 
			
		||||
	if str, ok := v.(string); ok {
 | 
			
		||||
		return str
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return ""
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										62
									
								
								common/helper/audio.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										62
									
								
								common/helper/audio.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,62 @@
 | 
			
		||||
package helper
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"context"
 | 
			
		||||
	"io"
 | 
			
		||||
	"os"
 | 
			
		||||
	"os/exec"
 | 
			
		||||
	"strconv"
 | 
			
		||||
 | 
			
		||||
	"github.com/pkg/errors"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// SaveTmpFile saves data to a temporary file. The filename would be apppended with a random string.
 | 
			
		||||
func SaveTmpFile(filename string, data io.Reader) (string, error) {
 | 
			
		||||
	if data == nil {
 | 
			
		||||
		return "", errors.New("data is nil")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	f, err := os.CreateTemp("", "*-"+filename)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", errors.Wrapf(err, "failed to create temporary file %s", filename)
 | 
			
		||||
	}
 | 
			
		||||
	defer f.Close()
 | 
			
		||||
 | 
			
		||||
	_, err = io.Copy(f, data)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", errors.Wrapf(err, "failed to copy data to temporary file %s", filename)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return f.Name(), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetAudioTokens returns the number of tokens in an audio file.
 | 
			
		||||
func GetAudioTokens(ctx context.Context, audio io.Reader, tokensPerSecond float64) (float64, error) {
 | 
			
		||||
	filename, err := SaveTmpFile("audio", audio)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return 0, errors.Wrap(err, "failed to save audio to temporary file")
 | 
			
		||||
	}
 | 
			
		||||
	defer os.Remove(filename)
 | 
			
		||||
 | 
			
		||||
	duration, err := GetAudioDuration(ctx, filename)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return 0, errors.Wrap(err, "failed to get audio tokens")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return duration * tokensPerSecond, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetAudioDuration returns the duration of an audio file in seconds.
 | 
			
		||||
func GetAudioDuration(ctx context.Context, filename string) (float64, error) {
 | 
			
		||||
	// ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 {{input}}
 | 
			
		||||
	c := exec.CommandContext(ctx, "/usr/bin/ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", filename)
 | 
			
		||||
	output, err := c.Output()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return 0, errors.Wrap(err, "failed to get audio duration")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Actually gpt-4-audio calculates tokens with 0.1s precision,
 | 
			
		||||
	// while whisper calculates tokens with 1s precision
 | 
			
		||||
	return strconv.ParseFloat(string(bytes.TrimSpace(output)), 64)
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										68
									
								
								common/helper/audio_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										68
									
								
								common/helper/audio_test.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,68 @@
 | 
			
		||||
package helper
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"os"
 | 
			
		||||
	"os/exec"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/stretchr/testify/require"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestGetAudioDuration(t *testing.T) {
 | 
			
		||||
	// skip if there is no ffmpeg installed
 | 
			
		||||
	_, err := exec.LookPath("ffmpeg")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Skip("ffmpeg not installed, skipping test")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	t.Run("should return correct duration for a valid audio file", func(t *testing.T) {
 | 
			
		||||
		tmpFile, err := os.CreateTemp("", "test_audio*.mp3")
 | 
			
		||||
		require.NoError(t, err)
 | 
			
		||||
		defer os.Remove(tmpFile.Name())
 | 
			
		||||
 | 
			
		||||
		// download test audio file
 | 
			
		||||
		resp, err := http.Get("https://s3.laisky.com/uploads/2025/01/audio-sample.m4a")
 | 
			
		||||
		require.NoError(t, err)
 | 
			
		||||
		defer resp.Body.Close()
 | 
			
		||||
 | 
			
		||||
		_, err = io.Copy(tmpFile, resp.Body)
 | 
			
		||||
		require.NoError(t, err)
 | 
			
		||||
		require.NoError(t, tmpFile.Close())
 | 
			
		||||
 | 
			
		||||
		duration, err := GetAudioDuration(context.Background(), tmpFile.Name())
 | 
			
		||||
		require.NoError(t, err)
 | 
			
		||||
		require.Equal(t, duration, 3.904)
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	t.Run("should return an error for a non-existent file", func(t *testing.T) {
 | 
			
		||||
		_, err := GetAudioDuration(context.Background(), "non_existent_file.mp3")
 | 
			
		||||
		require.Error(t, err)
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetAudioTokens(t *testing.T) {
 | 
			
		||||
	// skip if there is no ffmpeg installed
 | 
			
		||||
	_, err := exec.LookPath("ffmpeg")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Skip("ffmpeg not installed, skipping test")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	t.Run("should return correct tokens for a valid audio file", func(t *testing.T) {
 | 
			
		||||
		// download test audio file
 | 
			
		||||
		resp, err := http.Get("https://s3.laisky.com/uploads/2025/01/audio-sample.m4a")
 | 
			
		||||
		require.NoError(t, err)
 | 
			
		||||
		defer resp.Body.Close()
 | 
			
		||||
 | 
			
		||||
		tokens, err := GetAudioTokens(context.Background(), resp.Body, 50)
 | 
			
		||||
		require.NoError(t, err)
 | 
			
		||||
		require.Equal(t, tokens, 195.2)
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	t.Run("should return an error for a non-existent file", func(t *testing.T) {
 | 
			
		||||
		_, err := GetAudioTokens(context.Background(), nil, 1)
 | 
			
		||||
		require.Error(t, err)
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
@@ -6,13 +6,13 @@ import (
 | 
			
		||||
	"html/template"
 | 
			
		||||
	"log"
 | 
			
		||||
	"net"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"os/exec"
 | 
			
		||||
	"runtime"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/random"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@@ -32,6 +32,14 @@ func OpenBrowser(url string) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// RespondError sends a JSON response with a success status and an error message.
 | 
			
		||||
func RespondError(c *gin.Context, err error) {
 | 
			
		||||
	c.JSON(http.StatusOK, gin.H{
 | 
			
		||||
		"success": false,
 | 
			
		||||
		"message": err.Error(),
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GetIp() (ip string) {
 | 
			
		||||
	ips, err := net.InterfaceAddrs()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
 
 | 
			
		||||
@@ -5,6 +5,7 @@ import (
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// GetTimestamp get current timestamp in seconds
 | 
			
		||||
func GetTimestamp() int64 {
 | 
			
		||||
	return time.Now().Unix()
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -106,6 +106,8 @@ func testChannel(ctx context.Context, channel *model.Channel, request *relaymode
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err, nil
 | 
			
		||||
	}
 | 
			
		||||
	c.Set(ctxkey.ConvertedRequest, convertedRequest)
 | 
			
		||||
 | 
			
		||||
	jsonData, err := json.Marshal(convertedRequest)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err, nil
 | 
			
		||||
 
 | 
			
		||||
@@ -7,6 +7,7 @@ import (
 | 
			
		||||
 | 
			
		||||
	"gorm.io/gorm"
 | 
			
		||||
 | 
			
		||||
	"github.com/pkg/errors"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/utils"
 | 
			
		||||
)
 | 
			
		||||
@@ -42,7 +43,7 @@ func GetRandomSatisfiedChannel(group string, model string, ignoreFirstPriority b
 | 
			
		||||
		err = channelQuery.Order("RAND()").First(&ability).Error
 | 
			
		||||
	}
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
		return nil, errors.Wrap(err, "get random satisfied channel")
 | 
			
		||||
	}
 | 
			
		||||
	channel := Channel{}
 | 
			
		||||
	channel.Id = ability.ChannelId
 | 
			
		||||
 
 | 
			
		||||
@@ -38,7 +38,7 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
 | 
			
		||||
	return aiProxyLibraryRequest, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -67,7 +67,7 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -14,10 +14,14 @@ var ModelList = []string{
 | 
			
		||||
	"qwen2-72b-instruct", "qwen2-57b-a14b-instruct", "qwen2-7b-instruct", "qwen2-1.5b-instruct", "qwen2-0.5b-instruct",
 | 
			
		||||
	"qwen1.5-110b-chat", "qwen1.5-72b-chat", "qwen1.5-32b-chat", "qwen1.5-14b-chat", "qwen1.5-7b-chat", "qwen1.5-1.8b-chat", "qwen1.5-0.5b-chat",
 | 
			
		||||
	"qwen-72b-chat", "qwen-14b-chat", "qwen-7b-chat", "qwen-1.8b-chat", "qwen-1.8b-longcontext-chat",
 | 
			
		||||
	"qvq-72b-preview",
 | 
			
		||||
	"qwen2.5-vl-72b-instruct", "qwen2.5-vl-7b-instruct", "qwen2.5-vl-2b-instruct", "qwen2.5-vl-1b-instruct", "qwen2.5-vl-0.5b-instruct",
 | 
			
		||||
	"qwen2-vl-7b-instruct", "qwen2-vl-2b-instruct", "qwen-vl-v1", "qwen-vl-chat-v1",
 | 
			
		||||
	"qwen2-audio-instruct", "qwen-audio-chat",
 | 
			
		||||
	"qwen2.5-math-72b-instruct", "qwen2.5-math-7b-instruct", "qwen2.5-math-1.5b-instruct", "qwen2-math-72b-instruct", "qwen2-math-7b-instruct", "qwen2-math-1.5b-instruct",
 | 
			
		||||
	"qwen2.5-coder-32b-instruct", "qwen2.5-coder-14b-instruct", "qwen2.5-coder-7b-instruct", "qwen2.5-coder-3b-instruct", "qwen2.5-coder-1.5b-instruct", "qwen2.5-coder-0.5b-instruct",
 | 
			
		||||
	"text-embedding-v1", "text-embedding-v3", "text-embedding-v2", "text-embedding-async-v2", "text-embedding-async-v1",
 | 
			
		||||
	"ali-stable-diffusion-xl", "ali-stable-diffusion-v1.5", "wanx-v1",
 | 
			
		||||
	"qwen-mt-plus", "qwen-mt-turbo",
 | 
			
		||||
	"deepseek-r1", "deepseek-v3", "deepseek-r1-distill-qwen-1.5b", "deepseek-r1-distill-qwen-7b", "deepseek-r1-distill-qwen-14b", "deepseek-r1-distill-qwen-32b", "deepseek-r1-distill-llama-8b", "deepseek-r1-distill-llama-70b",
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										20
									
								
								relay/adaptor/alibailian/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								relay/adaptor/alibailian/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,20 @@
 | 
			
		||||
package alibailian
 | 
			
		||||
 | 
			
		||||
// https://help.aliyun.com/zh/model-studio/getting-started/models
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"qwen-turbo",
 | 
			
		||||
	"qwen-plus",
 | 
			
		||||
	"qwen-long",
 | 
			
		||||
	"qwen-max",
 | 
			
		||||
	"qwen-coder-plus",
 | 
			
		||||
	"qwen-coder-plus-latest",
 | 
			
		||||
	"qwen-coder-turbo",
 | 
			
		||||
	"qwen-coder-turbo-latest",
 | 
			
		||||
	"qwen-mt-plus",
 | 
			
		||||
	"qwen-mt-turbo",
 | 
			
		||||
	"qwq-32b-preview",
 | 
			
		||||
 | 
			
		||||
	"deepseek-r1",
 | 
			
		||||
	"deepseek-v3",
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										19
									
								
								relay/adaptor/alibailian/main.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								relay/adaptor/alibailian/main.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,19 @@
 | 
			
		||||
package alibailian
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/meta"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/relaymode"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func GetRequestURL(meta *meta.Meta) (string, error) {
 | 
			
		||||
	switch meta.Mode {
 | 
			
		||||
	case relaymode.ChatCompletions:
 | 
			
		||||
		return fmt.Sprintf("%s/compatible-mode/v1/chat/completions", meta.BaseURL), nil
 | 
			
		||||
	case relaymode.Embeddings:
 | 
			
		||||
		return fmt.Sprintf("%s/compatible-mode/v1/embeddings", meta.BaseURL), nil
 | 
			
		||||
	default:
 | 
			
		||||
	}
 | 
			
		||||
	return "", fmt.Errorf("unsupported relay mode %d for ali bailian", meta.Mode)
 | 
			
		||||
}
 | 
			
		||||
@@ -36,8 +36,8 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *me
 | 
			
		||||
 | 
			
		||||
	// https://x.com/alexalbert__/status/1812921642143900036
 | 
			
		||||
	// claude-3-5-sonnet can support 8k context
 | 
			
		||||
	if strings.HasPrefix(meta.ActualModelName, "claude-3-5-sonnet") {
 | 
			
		||||
		req.Header.Set("anthropic-beta", "max-tokens-3-5-sonnet-2024-07-15")
 | 
			
		||||
	if strings.HasPrefix(meta.ActualModelName, "claude-3-7-sonnet") {
 | 
			
		||||
		req.Header.Set("anthropic-beta", "output-128k-2025-02-19")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
@@ -47,10 +47,10 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
	return ConvertRequest(*request), nil
 | 
			
		||||
	return ConvertRequest(c, *request)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -3,11 +3,13 @@ package anthropic
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"claude-instant-1.2", "claude-2.0", "claude-2.1",
 | 
			
		||||
	"claude-3-haiku-20240307",
 | 
			
		||||
	"claude-3-5-haiku-20241022",
 | 
			
		||||
	"claude-3-5-haiku-latest",
 | 
			
		||||
	"claude-3-5-haiku-20241022",
 | 
			
		||||
	"claude-3-sonnet-20240229",
 | 
			
		||||
	"claude-3-opus-20240229",
 | 
			
		||||
	"claude-3-5-sonnet-latest",
 | 
			
		||||
	"claude-3-5-sonnet-20240620",
 | 
			
		||||
	"claude-3-5-sonnet-20241022",
 | 
			
		||||
	"claude-3-5-sonnet-latest",
 | 
			
		||||
	"claude-3-7-sonnet-latest",
 | 
			
		||||
	"claude-3-7-sonnet-20250219",
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -2,18 +2,21 @@ package anthropic
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bufio"
 | 
			
		||||
	"context"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/render"
 | 
			
		||||
	"io"
 | 
			
		||||
	"math"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/pkg/errors"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/image"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/render"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/openai"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
)
 | 
			
		||||
@@ -36,7 +39,16 @@ func stopReasonClaude2OpenAI(reason *string) string {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
 | 
			
		||||
// isModelSupportThinking is used to check if the model supports extended thinking
 | 
			
		||||
func isModelSupportThinking(model string) bool {
 | 
			
		||||
	if strings.Contains(model, "claude-3-7-sonnet") {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func ConvertRequest(c *gin.Context, textRequest model.GeneralOpenAIRequest) (*Request, error) {
 | 
			
		||||
	claudeTools := make([]Tool, 0, len(textRequest.Tools))
 | 
			
		||||
 | 
			
		||||
	for _, tool := range textRequest.Tools {
 | 
			
		||||
@@ -61,7 +73,27 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
 | 
			
		||||
		TopK:        textRequest.TopK,
 | 
			
		||||
		Stream:      textRequest.Stream,
 | 
			
		||||
		Tools:       claudeTools,
 | 
			
		||||
		Thinking:    textRequest.Thinking,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if isModelSupportThinking(textRequest.Model) &&
 | 
			
		||||
		c.Request.URL.Query().Has("thinking") && claudeRequest.Thinking == nil {
 | 
			
		||||
		claudeRequest.Thinking = &model.Thinking{
 | 
			
		||||
			Type:         "enabled",
 | 
			
		||||
			BudgetTokens: int(math.Min(1024, float64(claudeRequest.MaxTokens/2))),
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if isModelSupportThinking(textRequest.Model) &&
 | 
			
		||||
		claudeRequest.Thinking != nil {
 | 
			
		||||
		if claudeRequest.MaxTokens <= 1024 {
 | 
			
		||||
			return nil, errors.New("max_tokens must be greater than 1024 when using extended thinking")
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// top_p must be nil when using extended thinking
 | 
			
		||||
		claudeRequest.TopP = nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(claudeTools) > 0 {
 | 
			
		||||
		claudeToolChoice := struct {
 | 
			
		||||
			Type string `json:"type"`
 | 
			
		||||
@@ -127,7 +159,9 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
 | 
			
		||||
			var content Content
 | 
			
		||||
			if part.Type == model.ContentTypeText {
 | 
			
		||||
				content.Type = "text"
 | 
			
		||||
				content.Text = part.Text
 | 
			
		||||
				if part.Text != nil {
 | 
			
		||||
					content.Text = *part.Text
 | 
			
		||||
				}
 | 
			
		||||
			} else if part.Type == model.ContentTypeImageURL {
 | 
			
		||||
				content.Type = "image"
 | 
			
		||||
				content.Source = &ImageSource{
 | 
			
		||||
@@ -142,13 +176,14 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
 | 
			
		||||
		claudeMessage.Content = contents
 | 
			
		||||
		claudeRequest.Messages = append(claudeRequest.Messages, claudeMessage)
 | 
			
		||||
	}
 | 
			
		||||
	return &claudeRequest
 | 
			
		||||
	return &claudeRequest, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// https://docs.anthropic.com/claude/reference/messages-streaming
 | 
			
		||||
func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCompletionsStreamResponse, *Response) {
 | 
			
		||||
	var response *Response
 | 
			
		||||
	var responseText string
 | 
			
		||||
	var reasoningText string
 | 
			
		||||
	var stopReason string
 | 
			
		||||
	tools := make([]model.Tool, 0)
 | 
			
		||||
 | 
			
		||||
@@ -158,6 +193,10 @@ func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCo
 | 
			
		||||
	case "content_block_start":
 | 
			
		||||
		if claudeResponse.ContentBlock != nil {
 | 
			
		||||
			responseText = claudeResponse.ContentBlock.Text
 | 
			
		||||
			if claudeResponse.ContentBlock.Thinking != nil {
 | 
			
		||||
				reasoningText = *claudeResponse.ContentBlock.Thinking
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if claudeResponse.ContentBlock.Type == "tool_use" {
 | 
			
		||||
				tools = append(tools, model.Tool{
 | 
			
		||||
					Id:   claudeResponse.ContentBlock.Id,
 | 
			
		||||
@@ -172,6 +211,10 @@ func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCo
 | 
			
		||||
	case "content_block_delta":
 | 
			
		||||
		if claudeResponse.Delta != nil {
 | 
			
		||||
			responseText = claudeResponse.Delta.Text
 | 
			
		||||
			if claudeResponse.Delta.Thinking != nil {
 | 
			
		||||
				reasoningText = *claudeResponse.Delta.Thinking
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if claudeResponse.Delta.Type == "input_json_delta" {
 | 
			
		||||
				tools = append(tools, model.Tool{
 | 
			
		||||
					Function: model.Function{
 | 
			
		||||
@@ -189,9 +232,20 @@ func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCo
 | 
			
		||||
		if claudeResponse.Delta != nil && claudeResponse.Delta.StopReason != nil {
 | 
			
		||||
			stopReason = *claudeResponse.Delta.StopReason
 | 
			
		||||
		}
 | 
			
		||||
	case "thinking_delta":
 | 
			
		||||
		if claudeResponse.Delta != nil && claudeResponse.Delta.Thinking != nil {
 | 
			
		||||
			reasoningText = *claudeResponse.Delta.Thinking
 | 
			
		||||
		}
 | 
			
		||||
	case "ping",
 | 
			
		||||
		"message_stop",
 | 
			
		||||
		"content_block_stop":
 | 
			
		||||
	default:
 | 
			
		||||
		logger.SysErrorf("unknown stream response type %q", claudeResponse.Type)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var choice openai.ChatCompletionsStreamResponseChoice
 | 
			
		||||
	choice.Delta.Content = responseText
 | 
			
		||||
	choice.Delta.Reasoning = &reasoningText
 | 
			
		||||
	if len(tools) > 0 {
 | 
			
		||||
		choice.Delta.Content = nil // compatible with other OpenAI derivative applications, like LobeOpenAICompatibleFactory ...
 | 
			
		||||
		choice.Delta.ToolCalls = tools
 | 
			
		||||
@@ -209,11 +263,23 @@ func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCo
 | 
			
		||||
 | 
			
		||||
func ResponseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse {
 | 
			
		||||
	var responseText string
 | 
			
		||||
	if len(claudeResponse.Content) > 0 {
 | 
			
		||||
		responseText = claudeResponse.Content[0].Text
 | 
			
		||||
	}
 | 
			
		||||
	var reasoningText string
 | 
			
		||||
 | 
			
		||||
	tools := make([]model.Tool, 0)
 | 
			
		||||
	for _, v := range claudeResponse.Content {
 | 
			
		||||
		switch v.Type {
 | 
			
		||||
		case "thinking":
 | 
			
		||||
			if v.Thinking != nil {
 | 
			
		||||
				reasoningText += *v.Thinking
 | 
			
		||||
			} else {
 | 
			
		||||
				logger.Errorf(context.Background(), "thinking is nil in response")
 | 
			
		||||
			}
 | 
			
		||||
		case "text":
 | 
			
		||||
			responseText += v.Text
 | 
			
		||||
		default:
 | 
			
		||||
			logger.Warnf(context.Background(), "unknown response type %q", v.Type)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if v.Type == "tool_use" {
 | 
			
		||||
			args, _ := json.Marshal(v.Input)
 | 
			
		||||
			tools = append(tools, model.Tool{
 | 
			
		||||
@@ -226,11 +292,13 @@ func ResponseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse {
 | 
			
		||||
			})
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	choice := openai.TextResponseChoice{
 | 
			
		||||
		Index: 0,
 | 
			
		||||
		Message: model.Message{
 | 
			
		||||
			Role:      "assistant",
 | 
			
		||||
			Content:   responseText,
 | 
			
		||||
			Reasoning: &reasoningText,
 | 
			
		||||
			Name:      nil,
 | 
			
		||||
			ToolCalls: tools,
 | 
			
		||||
		},
 | 
			
		||||
@@ -277,6 +345,8 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
 | 
			
		||||
		data = strings.TrimPrefix(data, "data:")
 | 
			
		||||
		data = strings.TrimSpace(data)
 | 
			
		||||
 | 
			
		||||
		logger.Debugf(c.Request.Context(), "stream <- %q\n", data)
 | 
			
		||||
 | 
			
		||||
		var claudeResponse StreamResponse
 | 
			
		||||
		err := json.Unmarshal([]byte(data), &claudeResponse)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
@@ -344,6 +414,9 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName st
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	logger.Debugf(c.Request.Context(), "response <- %s\n", string(responseBody))
 | 
			
		||||
 | 
			
		||||
	var claudeResponse Response
 | 
			
		||||
	err = json.Unmarshal(responseBody, &claudeResponse)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,7 @@
 | 
			
		||||
package anthropic
 | 
			
		||||
 | 
			
		||||
import "github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
 | 
			
		||||
// https://docs.anthropic.com/claude/reference/messages_post
 | 
			
		||||
 | 
			
		||||
type Metadata struct {
 | 
			
		||||
@@ -22,6 +24,9 @@ type Content struct {
 | 
			
		||||
	Input     any    `json:"input,omitempty"`
 | 
			
		||||
	Content   string `json:"content,omitempty"`
 | 
			
		||||
	ToolUseId string `json:"tool_use_id,omitempty"`
 | 
			
		||||
	// https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#implementing-extended-thinking
 | 
			
		||||
	Thinking  *string `json:"thinking,omitempty"`
 | 
			
		||||
	Signature *string `json:"signature,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Message struct {
 | 
			
		||||
@@ -54,6 +59,7 @@ type Request struct {
 | 
			
		||||
	Tools         []Tool    `json:"tools,omitempty"`
 | 
			
		||||
	ToolChoice    any       `json:"tool_choice,omitempty"`
 | 
			
		||||
	//Metadata    `json:"metadata,omitempty"`
 | 
			
		||||
	Thinking *model.Thinking `json:"thinking,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Usage struct {
 | 
			
		||||
@@ -84,6 +90,8 @@ type Delta struct {
 | 
			
		||||
	PartialJson  string  `json:"partial_json,omitempty"`
 | 
			
		||||
	StopReason   *string `json:"stop_reason"`
 | 
			
		||||
	StopSequence *string `json:"stop_sequence"`
 | 
			
		||||
	Thinking     *string `json:"thinking,omitempty"`
 | 
			
		||||
	Signature    *string `json:"signature,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type StreamResponse struct {
 | 
			
		||||
 
 | 
			
		||||
@@ -72,7 +72,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *me
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -21,7 +21,11 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	claudeReq := anthropic.ConvertRequest(*request)
 | 
			
		||||
	claudeReq, err := anthropic.ConvertRequest(c, *request)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, errors.Wrap(err, "convert request")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c.Set(ctxkey.RequestModel, request.Model)
 | 
			
		||||
	c.Set(ctxkey.ConvertedRequest, claudeReq)
 | 
			
		||||
	return claudeReq, nil
 | 
			
		||||
 
 | 
			
		||||
@@ -36,6 +36,8 @@ var AwsModelIDMap = map[string]string{
 | 
			
		||||
	"claude-3-5-sonnet-20241022": "anthropic.claude-3-5-sonnet-20241022-v2:0",
 | 
			
		||||
	"claude-3-5-sonnet-latest":   "anthropic.claude-3-5-sonnet-20241022-v2:0",
 | 
			
		||||
	"claude-3-5-haiku-20241022":  "anthropic.claude-3-5-haiku-20241022-v1:0",
 | 
			
		||||
	"claude-3-7-sonnet-latest":   "anthropic.claude-3-7-sonnet-20250219-v1:0",
 | 
			
		||||
	"claude-3-7-sonnet-20250219": "anthropic.claude-3-7-sonnet-20250219-v1:0",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func awsModelID(requestModel string) (string, error) {
 | 
			
		||||
@@ -47,13 +49,14 @@ func awsModelID(requestModel string) (string, error) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Handler(c *gin.Context, awsCli *bedrockruntime.Client, modelName string) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) {
 | 
			
		||||
	awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel))
 | 
			
		||||
	awsModelID, err := awsModelID(c.GetString(ctxkey.RequestModel))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	awsModelID = utils.ConvertModelID2CrossRegionProfile(awsModelID, awsCli.Options().Region)
 | 
			
		||||
	awsReq := &bedrockruntime.InvokeModelInput{
 | 
			
		||||
		ModelId:     aws.String(awsModelId),
 | 
			
		||||
		ModelId:     aws.String(awsModelID),
 | 
			
		||||
		Accept:      aws.String("application/json"),
 | 
			
		||||
		ContentType: aws.String("application/json"),
 | 
			
		||||
	}
 | 
			
		||||
@@ -101,13 +104,14 @@ func Handler(c *gin.Context, awsCli *bedrockruntime.Client, modelName string) (*
 | 
			
		||||
 | 
			
		||||
func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) {
 | 
			
		||||
	createdTime := helper.GetTimestamp()
 | 
			
		||||
	awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel))
 | 
			
		||||
	awsModelID, err := awsModelID(c.GetString(ctxkey.RequestModel))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	awsModelID = utils.ConvertModelID2CrossRegionProfile(awsModelID, awsCli.Options().Region)
 | 
			
		||||
	awsReq := &bedrockruntime.InvokeModelWithResponseStreamInput{
 | 
			
		||||
		ModelId:     aws.String(awsModelId),
 | 
			
		||||
		ModelId:     aws.String(awsModelID),
 | 
			
		||||
		Accept:      aws.String("application/json"),
 | 
			
		||||
		ContentType: aws.String("application/json"),
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,9 @@
 | 
			
		||||
package aws
 | 
			
		||||
 | 
			
		||||
import "github.com/songquanpeng/one-api/relay/adaptor/anthropic"
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/anthropic"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Request is the request to AWS Claude
 | 
			
		||||
//
 | 
			
		||||
@@ -17,4 +20,5 @@ type Request struct {
 | 
			
		||||
	StopSequences    []string            `json:"stop_sequences,omitempty"`
 | 
			
		||||
	Tools            []anthropic.Tool    `json:"tools,omitempty"`
 | 
			
		||||
	ToolChoice       any                 `json:"tool_choice,omitempty"`
 | 
			
		||||
	Thinking         *model.Thinking     `json:"thinking,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -70,13 +70,14 @@ func ConvertRequest(textRequest relaymodel.GeneralOpenAIRequest) *Request {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func Handler(c *gin.Context, awsCli *bedrockruntime.Client, modelName string) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) {
 | 
			
		||||
	awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel))
 | 
			
		||||
	awsModelID, err := awsModelID(c.GetString(ctxkey.RequestModel))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	awsModelID = utils.ConvertModelID2CrossRegionProfile(awsModelID, awsCli.Options().Region)
 | 
			
		||||
	awsReq := &bedrockruntime.InvokeModelInput{
 | 
			
		||||
		ModelId:     aws.String(awsModelId),
 | 
			
		||||
		ModelId:     aws.String(awsModelID),
 | 
			
		||||
		Accept:      aws.String("application/json"),
 | 
			
		||||
		ContentType: aws.String("application/json"),
 | 
			
		||||
	}
 | 
			
		||||
@@ -140,13 +141,14 @@ func ResponseLlama2OpenAI(llamaResponse *Response) *openai.TextResponse {
 | 
			
		||||
 | 
			
		||||
func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) {
 | 
			
		||||
	createdTime := helper.GetTimestamp()
 | 
			
		||||
	awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel))
 | 
			
		||||
	awsModelID, err := awsModelID(c.GetString(ctxkey.RequestModel))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	awsModelID = utils.ConvertModelID2CrossRegionProfile(awsModelID, awsCli.Options().Region)
 | 
			
		||||
	awsReq := &bedrockruntime.InvokeModelWithResponseStreamInput{
 | 
			
		||||
		ModelId:     aws.String(awsModelId),
 | 
			
		||||
		ModelId:     aws.String(awsModelID),
 | 
			
		||||
		Accept:      aws.String("application/json"),
 | 
			
		||||
		ContentType: aws.String("application/json"),
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -39,7 +39,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *me
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										75
									
								
								relay/adaptor/aws/utils/consts.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										75
									
								
								relay/adaptor/aws/utils/consts.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,75 @@
 | 
			
		||||
package utils
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"slices"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// CrossRegionInferences is a list of model IDs that support cross-region inference.
 | 
			
		||||
//
 | 
			
		||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-support.html
 | 
			
		||||
//
 | 
			
		||||
// document.querySelectorAll('pre.programlisting code').forEach((e) => {console.log(e.innerHTML)})
 | 
			
		||||
var CrossRegionInferences = []string{
 | 
			
		||||
	"us.amazon.nova-lite-v1:0",
 | 
			
		||||
	"us.amazon.nova-micro-v1:0",
 | 
			
		||||
	"us.amazon.nova-pro-v1:0",
 | 
			
		||||
	"us.anthropic.claude-3-5-haiku-20241022-v1:0",
 | 
			
		||||
	"us.anthropic.claude-3-5-sonnet-20240620-v1:0",
 | 
			
		||||
	"us.anthropic.claude-3-5-sonnet-20241022-v2:0",
 | 
			
		||||
	"us.anthropic.claude-3-7-sonnet-20250219-v1:0",
 | 
			
		||||
	"us.anthropic.claude-3-haiku-20240307-v1:0",
 | 
			
		||||
	"us.anthropic.claude-3-opus-20240229-v1:0",
 | 
			
		||||
	"us.anthropic.claude-3-sonnet-20240229-v1:0",
 | 
			
		||||
	"us.meta.llama3-1-405b-instruct-v1:0",
 | 
			
		||||
	"us.meta.llama3-1-70b-instruct-v1:0",
 | 
			
		||||
	"us.meta.llama3-1-8b-instruct-v1:0",
 | 
			
		||||
	"us.meta.llama3-2-11b-instruct-v1:0",
 | 
			
		||||
	"us.meta.llama3-2-1b-instruct-v1:0",
 | 
			
		||||
	"us.meta.llama3-2-3b-instruct-v1:0",
 | 
			
		||||
	"us.meta.llama3-2-90b-instruct-v1:0",
 | 
			
		||||
	"us.meta.llama3-3-70b-instruct-v1:0",
 | 
			
		||||
	"us-gov.anthropic.claude-3-5-sonnet-20240620-v1:0",
 | 
			
		||||
	"us-gov.anthropic.claude-3-haiku-20240307-v1:0",
 | 
			
		||||
	"eu.amazon.nova-lite-v1:0",
 | 
			
		||||
	"eu.amazon.nova-micro-v1:0",
 | 
			
		||||
	"eu.amazon.nova-pro-v1:0",
 | 
			
		||||
	"eu.anthropic.claude-3-5-sonnet-20240620-v1:0",
 | 
			
		||||
	"eu.anthropic.claude-3-haiku-20240307-v1:0",
 | 
			
		||||
	"eu.anthropic.claude-3-sonnet-20240229-v1:0",
 | 
			
		||||
	"eu.meta.llama3-2-1b-instruct-v1:0",
 | 
			
		||||
	"eu.meta.llama3-2-3b-instruct-v1:0",
 | 
			
		||||
	"apac.amazon.nova-lite-v1:0",
 | 
			
		||||
	"apac.amazon.nova-micro-v1:0",
 | 
			
		||||
	"apac.amazon.nova-pro-v1:0",
 | 
			
		||||
	"apac.anthropic.claude-3-5-sonnet-20240620-v1:0",
 | 
			
		||||
	"apac.anthropic.claude-3-5-sonnet-20241022-v2:0",
 | 
			
		||||
	"apac.anthropic.claude-3-haiku-20240307-v1:0",
 | 
			
		||||
	"apac.anthropic.claude-3-sonnet-20240229-v1:0",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertModelID2CrossRegionProfile converts the model ID to a cross-region profile ID.
 | 
			
		||||
func ConvertModelID2CrossRegionProfile(model, region string) string {
 | 
			
		||||
	var regionPrefix string
 | 
			
		||||
	switch prefix := strings.Split(region, "-")[0]; prefix {
 | 
			
		||||
	case "us", "eu":
 | 
			
		||||
		regionPrefix = prefix
 | 
			
		||||
	case "ap":
 | 
			
		||||
		regionPrefix = "apac"
 | 
			
		||||
	default:
 | 
			
		||||
		// not supported, return original model
 | 
			
		||||
		return model
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	newModelID := regionPrefix + "." + model
 | 
			
		||||
	if slices.Contains(CrossRegionInferences, newModelID) {
 | 
			
		||||
		logger.Debugf(context.TODO(), "convert model %s to cross-region profile %s", model, newModelID)
 | 
			
		||||
		return newModelID
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// not found, return original model
 | 
			
		||||
	return model
 | 
			
		||||
}
 | 
			
		||||
@@ -109,7 +109,7 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -19,7 +19,7 @@ type Adaptor struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertImageRequest implements adaptor.Adaptor.
 | 
			
		||||
func (*Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (*Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	return nil, errors.New("not implemented")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -19,9 +19,8 @@ import (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func ConvertCompletionsRequest(textRequest model.GeneralOpenAIRequest) *Request {
 | 
			
		||||
	p, _ := textRequest.Prompt.(string)
 | 
			
		||||
	return &Request{
 | 
			
		||||
		Prompt:      p,
 | 
			
		||||
		Prompt:      textRequest.Prompt,
 | 
			
		||||
		MaxTokens:   textRequest.MaxTokens,
 | 
			
		||||
		Stream:      textRequest.Stream,
 | 
			
		||||
		Temperature: textRequest.Temperature,
 | 
			
		||||
 
 | 
			
		||||
@@ -15,7 +15,7 @@ import (
 | 
			
		||||
type Adaptor struct{}
 | 
			
		||||
 | 
			
		||||
// ConvertImageRequest implements adaptor.Adaptor.
 | 
			
		||||
func (*Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (*Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	return nil, errors.New("not implemented")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -38,7 +38,7 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
 | 
			
		||||
	return ConvertRequest(*request), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -39,7 +39,7 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
 | 
			
		||||
	return convertedRequest, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -5,9 +5,10 @@ import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	channelhelper "github.com/songquanpeng/one-api/relay/adaptor"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/openai"
 | 
			
		||||
@@ -20,17 +21,12 @@ type Adaptor struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) Init(meta *meta.Meta) {
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
 | 
			
		||||
	var defaultVersion string
 | 
			
		||||
	switch meta.ActualModelName {
 | 
			
		||||
	case "gemini-2.0-flash-exp",
 | 
			
		||||
		"gemini-2.0-flash-thinking-exp",
 | 
			
		||||
		"gemini-2.0-flash-thinking-exp-01-21":
 | 
			
		||||
		defaultVersion = "v1beta"
 | 
			
		||||
	default:
 | 
			
		||||
	defaultVersion := config.GeminiVersion
 | 
			
		||||
	if strings.Contains(meta.ActualModelName, "gemini-2") ||
 | 
			
		||||
		strings.Contains(meta.ActualModelName, "gemini-1.5") {
 | 
			
		||||
		defaultVersion = "v1beta"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -70,7 +66,7 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,11 +1,38 @@
 | 
			
		||||
package gemini
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// https://ai.google.dev/models/gemini
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"gemini-pro", "gemini-1.0-pro",
 | 
			
		||||
	"gemini-1.5-flash", "gemini-1.5-pro",
 | 
			
		||||
	"text-embedding-004", "aqa",
 | 
			
		||||
	"gemini-2.0-flash-exp",
 | 
			
		||||
	"gemini-2.0-flash-thinking-exp", "gemini-2.0-flash-thinking-exp-01-21",
 | 
			
		||||
var ModelList = geminiv2.ModelList
 | 
			
		||||
 | 
			
		||||
// ModelsSupportSystemInstruction is the list of models that support system instruction.
 | 
			
		||||
//
 | 
			
		||||
// https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/system-instructions
 | 
			
		||||
var ModelsSupportSystemInstruction = []string{
 | 
			
		||||
	// "gemini-1.0-pro-002",
 | 
			
		||||
	// "gemini-1.5-flash", "gemini-1.5-flash-001", "gemini-1.5-flash-002",
 | 
			
		||||
	// "gemini-1.5-flash-8b",
 | 
			
		||||
	// "gemini-1.5-pro", "gemini-1.5-pro-001", "gemini-1.5-pro-002",
 | 
			
		||||
	// "gemini-1.5-pro-experimental",
 | 
			
		||||
	"gemini-2.0-flash", "gemini-2.0-flash-exp",
 | 
			
		||||
	"gemini-2.0-flash-thinking-exp-01-21",
 | 
			
		||||
	"gemini-2.0-flash-lite",
 | 
			
		||||
	// "gemini-2.0-flash-exp-image-generation",
 | 
			
		||||
	"gemini-2.0-pro-exp-02-05",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// IsModelSupportSystemInstruction check if the model support system instruction.
 | 
			
		||||
//
 | 
			
		||||
// Because the main version of Go is 1.20, slice.Contains cannot be used
 | 
			
		||||
func IsModelSupportSystemInstruction(model string) bool {
 | 
			
		||||
	for _, m := range ModelsSupportSystemInstruction {
 | 
			
		||||
		if m == model {
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -8,19 +8,18 @@ import (
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/render"
 | 
			
		||||
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/image"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/random"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/render"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/openai"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/constant"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// https://ai.google.dev/docs/gemini_api_overview?hl=zh-cn
 | 
			
		||||
@@ -61,9 +60,10 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		GenerationConfig: ChatGenerationConfig{
 | 
			
		||||
			Temperature:     textRequest.Temperature,
 | 
			
		||||
			TopP:            textRequest.TopP,
 | 
			
		||||
			MaxOutputTokens: textRequest.MaxTokens,
 | 
			
		||||
			Temperature:        textRequest.Temperature,
 | 
			
		||||
			TopP:               textRequest.TopP,
 | 
			
		||||
			MaxOutputTokens:    textRequest.MaxTokens,
 | 
			
		||||
			ResponseModalities: geminiv2.GetModelModalities(textRequest.Model),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	if textRequest.ResponseFormat != nil {
 | 
			
		||||
@@ -106,9 +106,9 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
 | 
			
		||||
		var parts []Part
 | 
			
		||||
		imageNum := 0
 | 
			
		||||
		for _, part := range openaiContent {
 | 
			
		||||
			if part.Type == model.ContentTypeText {
 | 
			
		||||
			if part.Type == model.ContentTypeText && part.Text != nil && *part.Text != "" {
 | 
			
		||||
				parts = append(parts, Part{
 | 
			
		||||
					Text: part.Text,
 | 
			
		||||
					Text: *part.Text,
 | 
			
		||||
				})
 | 
			
		||||
			} else if part.Type == model.ContentTypeImageURL {
 | 
			
		||||
				imageNum += 1
 | 
			
		||||
@@ -132,9 +132,16 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
 | 
			
		||||
		}
 | 
			
		||||
		// Converting system prompt to prompt from user for the same reason
 | 
			
		||||
		if content.Role == "system" {
 | 
			
		||||
			content.Role = "user"
 | 
			
		||||
			shouldAddDummyModelMessage = true
 | 
			
		||||
			if IsModelSupportSystemInstruction(textRequest.Model) {
 | 
			
		||||
				geminiRequest.SystemInstruction = &content
 | 
			
		||||
				geminiRequest.SystemInstruction.Role = ""
 | 
			
		||||
				continue
 | 
			
		||||
			} else {
 | 
			
		||||
				content.Role = "user"
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		geminiRequest.Contents = append(geminiRequest.Contents, content)
 | 
			
		||||
 | 
			
		||||
		// If a system message is the last message, we need to add a dummy model message to make gemini happy
 | 
			
		||||
@@ -251,19 +258,52 @@ func responseGeminiChat2OpenAI(response *ChatResponse) *openai.TextResponse {
 | 
			
		||||
			if candidate.Content.Parts[0].FunctionCall != nil {
 | 
			
		||||
				choice.Message.ToolCalls = getToolCalls(&candidate)
 | 
			
		||||
			} else {
 | 
			
		||||
				// Handle text and image content
 | 
			
		||||
				var builder strings.Builder
 | 
			
		||||
				var contentItems []model.MessageContent
 | 
			
		||||
 | 
			
		||||
				for _, part := range candidate.Content.Parts {
 | 
			
		||||
					if i > 0 {
 | 
			
		||||
						builder.WriteString("\n")
 | 
			
		||||
					if part.Text != "" {
 | 
			
		||||
						// For text parts
 | 
			
		||||
						if i > 0 {
 | 
			
		||||
							builder.WriteString("\n")
 | 
			
		||||
						}
 | 
			
		||||
						builder.WriteString(part.Text)
 | 
			
		||||
 | 
			
		||||
						// Add to content items
 | 
			
		||||
						contentItems = append(contentItems, model.MessageContent{
 | 
			
		||||
							Type: model.ContentTypeText,
 | 
			
		||||
							Text: &part.Text,
 | 
			
		||||
						})
 | 
			
		||||
					}
 | 
			
		||||
 | 
			
		||||
					if part.InlineData != nil && part.InlineData.MimeType != "" && part.InlineData.Data != "" {
 | 
			
		||||
						// For inline image data
 | 
			
		||||
						imageURL := &model.ImageURL{
 | 
			
		||||
							// The data is already base64 encoded
 | 
			
		||||
							Url: fmt.Sprintf("data:%s;base64,%s", part.InlineData.MimeType, part.InlineData.Data),
 | 
			
		||||
						}
 | 
			
		||||
 | 
			
		||||
						contentItems = append(contentItems, model.MessageContent{
 | 
			
		||||
							Type:     model.ContentTypeImageURL,
 | 
			
		||||
							ImageURL: imageURL,
 | 
			
		||||
						})
 | 
			
		||||
					}
 | 
			
		||||
					builder.WriteString(part.Text)
 | 
			
		||||
				}
 | 
			
		||||
				choice.Message.Content = builder.String()
 | 
			
		||||
 | 
			
		||||
				// If we have multiple content types, use structured content format
 | 
			
		||||
				if len(contentItems) > 1 || (len(contentItems) == 1 && contentItems[0].Type != model.ContentTypeText) {
 | 
			
		||||
					choice.Message.Content = contentItems
 | 
			
		||||
				} else {
 | 
			
		||||
					// Otherwise use the simple string content format
 | 
			
		||||
					choice.Message.Content = builder.String()
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			choice.Message.Content = ""
 | 
			
		||||
			choice.FinishReason = candidate.FinishReason
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		fullTextResponse.Choices = append(fullTextResponse.Choices, choice)
 | 
			
		||||
	}
 | 
			
		||||
	return &fullTextResponse
 | 
			
		||||
@@ -271,14 +311,78 @@ func responseGeminiChat2OpenAI(response *ChatResponse) *openai.TextResponse {
 | 
			
		||||
 | 
			
		||||
func streamResponseGeminiChat2OpenAI(geminiResponse *ChatResponse) *openai.ChatCompletionsStreamResponse {
 | 
			
		||||
	var choice openai.ChatCompletionsStreamResponseChoice
 | 
			
		||||
	choice.Delta.Content = geminiResponse.GetResponseText()
 | 
			
		||||
	//choice.FinishReason = &constant.StopFinishReason
 | 
			
		||||
	choice.Delta.Role = "assistant"
 | 
			
		||||
 | 
			
		||||
	// Check if we have any candidates
 | 
			
		||||
	if len(geminiResponse.Candidates) == 0 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Get the first candidate
 | 
			
		||||
	candidate := geminiResponse.Candidates[0]
 | 
			
		||||
 | 
			
		||||
	// Check if there are parts in the content
 | 
			
		||||
	if len(candidate.Content.Parts) == 0 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Handle different content types in the parts
 | 
			
		||||
	for _, part := range candidate.Content.Parts {
 | 
			
		||||
		// Handle text content
 | 
			
		||||
		if part.Text != "" {
 | 
			
		||||
			// Store as string for simple text responses
 | 
			
		||||
			textContent := part.Text
 | 
			
		||||
			choice.Delta.Content = textContent
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Handle image content
 | 
			
		||||
		if part.InlineData != nil && part.InlineData.MimeType != "" && part.InlineData.Data != "" {
 | 
			
		||||
			// Create a structured response for image content
 | 
			
		||||
			imageUrl := fmt.Sprintf("data:%s;base64,%s", part.InlineData.MimeType, part.InlineData.Data)
 | 
			
		||||
 | 
			
		||||
			// If we already have text content, create a mixed content response
 | 
			
		||||
			if strContent, ok := choice.Delta.Content.(string); ok && strContent != "" {
 | 
			
		||||
				// Convert the existing text content and add the image
 | 
			
		||||
				messageContents := []model.MessageContent{
 | 
			
		||||
					{
 | 
			
		||||
						Type: model.ContentTypeText,
 | 
			
		||||
						Text: &strContent,
 | 
			
		||||
					},
 | 
			
		||||
					{
 | 
			
		||||
						Type: model.ContentTypeImageURL,
 | 
			
		||||
						ImageURL: &model.ImageURL{
 | 
			
		||||
							Url: imageUrl,
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				}
 | 
			
		||||
				choice.Delta.Content = messageContents
 | 
			
		||||
			} else {
 | 
			
		||||
				// Only have image content
 | 
			
		||||
				choice.Delta.Content = []model.MessageContent{
 | 
			
		||||
					{
 | 
			
		||||
						Type: model.ContentTypeImageURL,
 | 
			
		||||
						ImageURL: &model.ImageURL{
 | 
			
		||||
							Url: imageUrl,
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Handle function calls (if present)
 | 
			
		||||
		if part.FunctionCall != nil {
 | 
			
		||||
			choice.Delta.ToolCalls = getToolCalls(&candidate)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Create response
 | 
			
		||||
	var response openai.ChatCompletionsStreamResponse
 | 
			
		||||
	response.Id = fmt.Sprintf("chatcmpl-%s", random.GetUUID())
 | 
			
		||||
	response.Created = helper.GetTimestamp()
 | 
			
		||||
	response.Object = "chat.completion.chunk"
 | 
			
		||||
	response.Model = "gemini"
 | 
			
		||||
	response.Choices = []openai.ChatCompletionsStreamResponseChoice{choice}
 | 
			
		||||
 | 
			
		||||
	return &response
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -304,17 +408,23 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
 | 
			
		||||
	scanner := bufio.NewScanner(resp.Body)
 | 
			
		||||
	scanner.Split(bufio.ScanLines)
 | 
			
		||||
 | 
			
		||||
	buffer := make([]byte, 1024*1024) // 1MB buffer
 | 
			
		||||
	scanner.Buffer(buffer, len(buffer))
 | 
			
		||||
 | 
			
		||||
	common.SetEventStreamHeaders(c)
 | 
			
		||||
 | 
			
		||||
	for scanner.Scan() {
 | 
			
		||||
		data := scanner.Text()
 | 
			
		||||
		data = strings.TrimSpace(data)
 | 
			
		||||
 | 
			
		||||
		if !strings.HasPrefix(data, "data: ") {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		data = strings.TrimPrefix(data, "data: ")
 | 
			
		||||
		data = strings.TrimSuffix(data, "\"")
 | 
			
		||||
 | 
			
		||||
		fmt.Printf(">> gemini response: %s\n", data)
 | 
			
		||||
 | 
			
		||||
		var geminiResponse ChatResponse
 | 
			
		||||
		err := json.Unmarshal([]byte(data), &geminiResponse)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
@@ -354,6 +464,7 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName st
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err = resp.Body.Close()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
 
 | 
			
		||||
@@ -1,10 +1,24 @@
 | 
			
		||||
package gemini
 | 
			
		||||
 | 
			
		||||
type ChatRequest struct {
 | 
			
		||||
	Contents         []ChatContent        `json:"contents"`
 | 
			
		||||
	SafetySettings   []ChatSafetySettings `json:"safety_settings,omitempty"`
 | 
			
		||||
	GenerationConfig ChatGenerationConfig `json:"generation_config,omitempty"`
 | 
			
		||||
	Tools            []ChatTools          `json:"tools,omitempty"`
 | 
			
		||||
	Contents          []ChatContent        `json:"contents"`
 | 
			
		||||
	SafetySettings    []ChatSafetySettings `json:"safety_settings,omitempty"`
 | 
			
		||||
	GenerationConfig  ChatGenerationConfig `json:"generation_config,omitempty"`
 | 
			
		||||
	Tools             []ChatTools          `json:"tools,omitempty"`
 | 
			
		||||
	SystemInstruction *ChatContent         `json:"system_instruction,omitempty"`
 | 
			
		||||
	ModelVersion      string               `json:"model_version,omitempty"`
 | 
			
		||||
	UsageMetadata     *UsageMetadata       `json:"usage_metadata,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type UsageMetadata struct {
 | 
			
		||||
	PromptTokenCount    int                   `json:"promptTokenCount,omitempty"`
 | 
			
		||||
	TotalTokenCount     int                   `json:"totalTokenCount,omitempty"`
 | 
			
		||||
	PromptTokensDetails []PromptTokensDetails `json:"promptTokensDetails,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type PromptTokensDetails struct {
 | 
			
		||||
	Modality   string `json:"modality,omitempty"`
 | 
			
		||||
	TokenCount int    `json:"tokenCount,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type EmbeddingRequest struct {
 | 
			
		||||
@@ -65,12 +79,13 @@ type ChatTools struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ChatGenerationConfig struct {
 | 
			
		||||
	ResponseMimeType string   `json:"responseMimeType,omitempty"`
 | 
			
		||||
	ResponseSchema   any      `json:"responseSchema,omitempty"`
 | 
			
		||||
	Temperature      *float64 `json:"temperature,omitempty"`
 | 
			
		||||
	TopP             *float64 `json:"topP,omitempty"`
 | 
			
		||||
	TopK             float64  `json:"topK,omitempty"`
 | 
			
		||||
	MaxOutputTokens  int      `json:"maxOutputTokens,omitempty"`
 | 
			
		||||
	CandidateCount   int      `json:"candidateCount,omitempty"`
 | 
			
		||||
	StopSequences    []string `json:"stopSequences,omitempty"`
 | 
			
		||||
	ResponseMimeType   string   `json:"responseMimeType,omitempty"`
 | 
			
		||||
	ResponseSchema     any      `json:"responseSchema,omitempty"`
 | 
			
		||||
	Temperature        *float64 `json:"temperature,omitempty"`
 | 
			
		||||
	TopP               *float64 `json:"topP,omitempty"`
 | 
			
		||||
	TopK               float64  `json:"topK,omitempty"`
 | 
			
		||||
	MaxOutputTokens    int      `json:"maxOutputTokens,omitempty"`
 | 
			
		||||
	CandidateCount     int      `json:"candidateCount,omitempty"`
 | 
			
		||||
	StopSequences      []string `json:"stopSequences,omitempty"`
 | 
			
		||||
	ResponseModalities []string `json:"responseModalities,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										39
									
								
								relay/adaptor/geminiv2/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								relay/adaptor/geminiv2/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,39 @@
 | 
			
		||||
package geminiv2
 | 
			
		||||
 | 
			
		||||
import "strings"
 | 
			
		||||
 | 
			
		||||
// https://ai.google.dev/models/gemini
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"gemini-pro", "gemini-1.0-pro",
 | 
			
		||||
	"gemma-2-2b-it", "gemma-2-9b-it", "gemma-2-27b-it",
 | 
			
		||||
	"gemma-3-27b-it",
 | 
			
		||||
	"gemini-1.5-flash", "gemini-1.5-flash-8b",
 | 
			
		||||
	"gemini-1.5-pro", "gemini-1.5-pro-experimental",
 | 
			
		||||
	"text-embedding-004", "aqa",
 | 
			
		||||
	"gemini-2.0-flash", "gemini-2.0-flash-exp",
 | 
			
		||||
	"gemini-2.0-flash-lite-preview-02-05",
 | 
			
		||||
	"gemini-2.0-flash-thinking-exp-01-21",
 | 
			
		||||
	"gemini-2.0-flash-exp-image-generation",
 | 
			
		||||
	"gemini-2.0-pro-exp-02-05",
 | 
			
		||||
	"gemini-2.5-pro-exp-03-25",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	ModalityText  = "TEXT"
 | 
			
		||||
	ModalityImage = "IMAGE"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// GetModelModalities returns the modalities of the model.
 | 
			
		||||
func GetModelModalities(model string) []string {
 | 
			
		||||
	if strings.Contains(model, "-image-generation") {
 | 
			
		||||
		return []string{ModalityText, ModalityImage}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Until 2025-03-26, the following models do not accept the responseModalities field
 | 
			
		||||
	if model == "gemini-2.5-pro-exp-03-25" {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return []string{ModalityText}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										14
									
								
								relay/adaptor/geminiv2/main.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								relay/adaptor/geminiv2/main.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,14 @@
 | 
			
		||||
package geminiv2
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/meta"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func GetRequestURL(meta *meta.Meta) (string, error) {
 | 
			
		||||
	baseURL := strings.TrimSuffix(meta.BaseURL, "/")
 | 
			
		||||
	requestPath := strings.TrimPrefix(meta.RequestURLPath, "/v1")
 | 
			
		||||
	return fmt.Sprintf("%s%s", baseURL, requestPath), nil
 | 
			
		||||
}
 | 
			
		||||
@@ -1,27 +1,32 @@
 | 
			
		||||
package groq
 | 
			
		||||
 | 
			
		||||
// ModelList is a list of models that can be used with Groq.
 | 
			
		||||
//
 | 
			
		||||
// https://console.groq.com/docs/models
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	// Regular Models
 | 
			
		||||
	"distil-whisper-large-v3-en",
 | 
			
		||||
	"gemma2-9b-it",
 | 
			
		||||
	"llama-3.1-70b-versatile",
 | 
			
		||||
	"llama-3.3-70b-versatile",
 | 
			
		||||
	"llama-3.1-8b-instant",
 | 
			
		||||
	"llama-3.2-11b-text-preview",
 | 
			
		||||
	"llama-3.2-11b-vision-preview",
 | 
			
		||||
	"llama-3.2-1b-preview",
 | 
			
		||||
	"llama-3.2-3b-preview",
 | 
			
		||||
	"llama-3.2-90b-text-preview",
 | 
			
		||||
	"llama-3.2-90b-vision-preview",
 | 
			
		||||
	"llama-guard-3-8b",
 | 
			
		||||
	"llama3-70b-8192",
 | 
			
		||||
	"llama3-8b-8192",
 | 
			
		||||
	"llama3-groq-70b-8192-tool-use-preview",
 | 
			
		||||
	"llama3-groq-8b-8192-tool-use-preview",
 | 
			
		||||
	"llava-v1.5-7b-4096-preview",
 | 
			
		||||
	"mixtral-8x7b-32768",
 | 
			
		||||
	"distil-whisper-large-v3-en",
 | 
			
		||||
	"whisper-large-v3",
 | 
			
		||||
	"whisper-large-v3-turbo",
 | 
			
		||||
 | 
			
		||||
	// Preview Models
 | 
			
		||||
	"qwen-qwq-32b",
 | 
			
		||||
	"mistral-saba-24b",
 | 
			
		||||
	"qwen-2.5-coder-32b",
 | 
			
		||||
	"qwen-2.5-32b",
 | 
			
		||||
	"deepseek-r1-distill-qwen-32b",
 | 
			
		||||
	"deepseek-r1-distill-llama-70b-specdec",
 | 
			
		||||
	"deepseek-r1-distill-llama-70b",
 | 
			
		||||
	"llama-3.2-1b-preview",
 | 
			
		||||
	"llama-3.2-3b-preview",
 | 
			
		||||
	"llama-3.2-11b-vision-preview",
 | 
			
		||||
	"llama-3.2-90b-vision-preview",
 | 
			
		||||
	"llama-3.3-70b-specdec",
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -13,7 +13,7 @@ type Adaptor interface {
 | 
			
		||||
	GetRequestURL(meta *meta.Meta) (string, error)
 | 
			
		||||
	SetupRequestHeader(c *gin.Context, req *http.Request, meta *meta.Meta) error
 | 
			
		||||
	ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error)
 | 
			
		||||
	ConvertImageRequest(request *model.ImageRequest) (any, error)
 | 
			
		||||
	ConvertImageRequest(c *gin.Context, request *model.ImageRequest) (any, error)
 | 
			
		||||
	DoRequest(c *gin.Context, meta *meta.Meta, requestBody io.Reader) (*http.Response, error)
 | 
			
		||||
	DoResponse(c *gin.Context, resp *http.Response, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode)
 | 
			
		||||
	GetModelList() []string
 | 
			
		||||
 
 | 
			
		||||
@@ -48,7 +48,7 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -43,7 +43,9 @@ func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
 | 
			
		||||
		for _, part := range openaiContent {
 | 
			
		||||
			switch part.Type {
 | 
			
		||||
			case model.ContentTypeText:
 | 
			
		||||
				contentText = part.Text
 | 
			
		||||
				if part.Text != nil {
 | 
			
		||||
					contentText = *part.Text
 | 
			
		||||
				}
 | 
			
		||||
			case model.ContentTypeImageURL:
 | 
			
		||||
				_, data, _ := image.GetImageFromUrl(part.ImageURL.Url)
 | 
			
		||||
				imageUrls = append(imageUrls, data)
 | 
			
		||||
 
 | 
			
		||||
@@ -1,19 +1,27 @@
 | 
			
		||||
package openai
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"math"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/pkg/errors"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/ctxkey"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/alibailian"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/baiduv2"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/doubao"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/minimax"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/novita"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/openrouter"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/billing/ratio"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channeltype"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/meta"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
@@ -31,16 +39,24 @@ func (a *Adaptor) Init(meta *meta.Meta) {
 | 
			
		||||
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
 | 
			
		||||
	switch meta.ChannelType {
 | 
			
		||||
	case channeltype.Azure:
 | 
			
		||||
		defaultVersion := meta.Config.APIVersion
 | 
			
		||||
 | 
			
		||||
		// https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python#api--feature-support
 | 
			
		||||
		if strings.HasPrefix(meta.ActualModelName, "o1") ||
 | 
			
		||||
			strings.HasPrefix(meta.ActualModelName, "o3") {
 | 
			
		||||
			defaultVersion = "2024-12-01-preview"
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if meta.Mode == relaymode.ImagesGenerations {
 | 
			
		||||
			// https://learn.microsoft.com/en-us/azure/ai-services/openai/dall-e-quickstart?tabs=dalle3%2Ccommand-line&pivots=rest-api
 | 
			
		||||
			// https://{resource_name}.openai.azure.com/openai/deployments/dall-e-3/images/generations?api-version=2024-03-01-preview
 | 
			
		||||
			fullRequestURL := fmt.Sprintf("%s/openai/deployments/%s/images/generations?api-version=%s", meta.BaseURL, meta.ActualModelName, meta.Config.APIVersion)
 | 
			
		||||
			fullRequestURL := fmt.Sprintf("%s/openai/deployments/%s/images/generations?api-version=%s", meta.BaseURL, meta.ActualModelName, defaultVersion)
 | 
			
		||||
			return fullRequestURL, nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api
 | 
			
		||||
		requestURL := strings.Split(meta.RequestURLPath, "?")[0]
 | 
			
		||||
		requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, meta.Config.APIVersion)
 | 
			
		||||
		requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, defaultVersion)
 | 
			
		||||
		task := strings.TrimPrefix(requestURL, "/v1/")
 | 
			
		||||
		model_ := meta.ActualModelName
 | 
			
		||||
		model_ = strings.Replace(model_, ".", "", -1)
 | 
			
		||||
@@ -56,6 +72,10 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
 | 
			
		||||
		return novita.GetRequestURL(meta)
 | 
			
		||||
	case channeltype.BaiduV2:
 | 
			
		||||
		return baiduv2.GetRequestURL(meta)
 | 
			
		||||
	case channeltype.AliBailian:
 | 
			
		||||
		return alibailian.GetRequestURL(meta)
 | 
			
		||||
	case channeltype.GeminiOpenAICompatible:
 | 
			
		||||
		return geminiv2.GetRequestURL(meta)
 | 
			
		||||
	default:
 | 
			
		||||
		return GetFullRequestURL(meta.BaseURL, meta.RequestURLPath, meta.ChannelType), nil
 | 
			
		||||
	}
 | 
			
		||||
@@ -79,28 +99,92 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
	if request.Stream {
 | 
			
		||||
 | 
			
		||||
	meta := meta.GetByContext(c)
 | 
			
		||||
	switch meta.ChannelType {
 | 
			
		||||
	case channeltype.OpenRouter:
 | 
			
		||||
		includeReasoning := true
 | 
			
		||||
		request.IncludeReasoning = &includeReasoning
 | 
			
		||||
		if request.Provider == nil || request.Provider.Sort == "" &&
 | 
			
		||||
			config.OpenrouterProviderSort != "" {
 | 
			
		||||
			if request.Provider == nil {
 | 
			
		||||
				request.Provider = &openrouter.RequestProvider{}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			request.Provider.Sort = config.OpenrouterProviderSort
 | 
			
		||||
		}
 | 
			
		||||
	default:
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if request.Stream && !config.EnforceIncludeUsage {
 | 
			
		||||
		logger.Warn(c.Request.Context(),
 | 
			
		||||
			"please set ENFORCE_INCLUDE_USAGE=true to ensure accurate billing in stream mode")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if config.EnforceIncludeUsage && request.Stream {
 | 
			
		||||
		// always return usage in stream mode
 | 
			
		||||
		if request.StreamOptions == nil {
 | 
			
		||||
			request.StreamOptions = &model.StreamOptions{}
 | 
			
		||||
		}
 | 
			
		||||
		request.StreamOptions.IncludeUsage = true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// o1/o1-mini/o1-preview do not support system prompt/max_tokens/temperature
 | 
			
		||||
	if strings.HasPrefix(meta.ActualModelName, "o1") ||
 | 
			
		||||
		strings.HasPrefix(meta.ActualModelName, "o3") {
 | 
			
		||||
		temperature := float64(1)
 | 
			
		||||
		request.Temperature = &temperature // Only the default (1) value is supported
 | 
			
		||||
 | 
			
		||||
		request.MaxTokens = 0
 | 
			
		||||
		request.Messages = func(raw []model.Message) (filtered []model.Message) {
 | 
			
		||||
			for i := range raw {
 | 
			
		||||
				if raw[i].Role != "system" {
 | 
			
		||||
					filtered = append(filtered, raw[i])
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			return
 | 
			
		||||
		}(request.Messages)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// web search do not support system prompt/max_tokens/temperature
 | 
			
		||||
	if strings.HasPrefix(meta.ActualModelName, "gpt-4o-search") ||
 | 
			
		||||
		strings.HasPrefix(meta.ActualModelName, "gpt-4o-mini-search") {
 | 
			
		||||
		request.Temperature = nil
 | 
			
		||||
		request.TopP = nil
 | 
			
		||||
		request.PresencePenalty = nil
 | 
			
		||||
		request.N = nil
 | 
			
		||||
		request.FrequencyPenalty = nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if request.Stream && !config.EnforceIncludeUsage &&
 | 
			
		||||
		(strings.HasPrefix(request.Model, "gpt-4o-audio") ||
 | 
			
		||||
			strings.HasPrefix(request.Model, "gpt-4o-mini-audio")) {
 | 
			
		||||
		// TODO: Since it is not clear how to implement billing in stream mode,
 | 
			
		||||
		// it is temporarily not supported
 | 
			
		||||
		return nil, errors.New("set ENFORCE_INCLUDE_USAGE=true to enable stream mode for gpt-4o-audio")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return request, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
	return request, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) DoRequest(c *gin.Context, meta *meta.Meta, requestBody io.Reader) (*http.Response, error) {
 | 
			
		||||
func (a *Adaptor) DoRequest(c *gin.Context,
 | 
			
		||||
	meta *meta.Meta,
 | 
			
		||||
	requestBody io.Reader) (*http.Response, error) {
 | 
			
		||||
	return adaptor.DoRequestHelper(a, c, meta, requestBody)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
 | 
			
		||||
func (a *Adaptor) DoResponse(c *gin.Context,
 | 
			
		||||
	resp *http.Response,
 | 
			
		||||
	meta *meta.Meta) (usage *model.Usage,
 | 
			
		||||
	err *model.ErrorWithStatusCode) {
 | 
			
		||||
	if meta.IsStream {
 | 
			
		||||
		var responseText string
 | 
			
		||||
		err, responseText, usage = StreamHandler(c, resp, meta.Mode)
 | 
			
		||||
@@ -115,10 +199,61 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Met
 | 
			
		||||
		switch meta.Mode {
 | 
			
		||||
		case relaymode.ImagesGenerations:
 | 
			
		||||
			err, _ = ImageHandler(c, resp)
 | 
			
		||||
		case relaymode.ImagesEdits:
 | 
			
		||||
			err, _ = ImagesEditsHandler(c, resp)
 | 
			
		||||
		default:
 | 
			
		||||
			err, usage = Handler(c, resp, meta.PromptTokens, meta.ActualModelName)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	// calculate web-search tool cost
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	if usage != nil {
 | 
			
		||||
		searchContextSize := "medium"
 | 
			
		||||
		var req *model.GeneralOpenAIRequest
 | 
			
		||||
		if vi, ok := c.Get(ctxkey.ConvertedRequest); ok {
 | 
			
		||||
			if req, ok = vi.(*model.GeneralOpenAIRequest); ok {
 | 
			
		||||
				if req != nil &&
 | 
			
		||||
					req.WebSearchOptions != nil &&
 | 
			
		||||
					req.WebSearchOptions.SearchContextSize != nil {
 | 
			
		||||
					searchContextSize = *req.WebSearchOptions.SearchContextSize
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				switch {
 | 
			
		||||
				case strings.HasPrefix(meta.ActualModelName, "gpt-4o-search"):
 | 
			
		||||
					switch searchContextSize {
 | 
			
		||||
					case "low":
 | 
			
		||||
						usage.ToolsCost += int64(math.Ceil(30 / 1000 * ratio.QuotaPerUsd))
 | 
			
		||||
					case "medium":
 | 
			
		||||
						usage.ToolsCost += int64(math.Ceil(35 / 1000 * ratio.QuotaPerUsd))
 | 
			
		||||
					case "high":
 | 
			
		||||
						usage.ToolsCost += int64(math.Ceil(40 / 1000 * ratio.QuotaPerUsd))
 | 
			
		||||
					default:
 | 
			
		||||
						return nil, ErrorWrapper(
 | 
			
		||||
							errors.Errorf("invalid search context size %q", searchContextSize),
 | 
			
		||||
							"invalid search context size: "+searchContextSize,
 | 
			
		||||
							http.StatusBadRequest)
 | 
			
		||||
					}
 | 
			
		||||
				case strings.HasPrefix(meta.ActualModelName, "gpt-4o-mini-search"):
 | 
			
		||||
					switch searchContextSize {
 | 
			
		||||
					case "low":
 | 
			
		||||
						usage.ToolsCost += int64(math.Ceil(25 / 1000 * ratio.QuotaPerUsd))
 | 
			
		||||
					case "medium":
 | 
			
		||||
						usage.ToolsCost += int64(math.Ceil(27.5 / 1000 * ratio.QuotaPerUsd))
 | 
			
		||||
					case "high":
 | 
			
		||||
						usage.ToolsCost += int64(math.Ceil(30 / 1000 * ratio.QuotaPerUsd))
 | 
			
		||||
					default:
 | 
			
		||||
						return nil, ErrorWrapper(
 | 
			
		||||
							errors.Errorf("invalid search context size %q", searchContextSize),
 | 
			
		||||
							"invalid search context size: "+searchContextSize,
 | 
			
		||||
							http.StatusBadRequest)
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -2,10 +2,12 @@ package openai
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/ai360"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/alibailian"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/baichuan"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/baiduv2"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/deepseek"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/doubao"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/groq"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/lingyiwanwu"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/minimax"
 | 
			
		||||
@@ -79,6 +81,10 @@ func GetCompatibleChannelMeta(channelType int) (string, []string) {
 | 
			
		||||
		return "xunfeiv2", xunfeiv2.ModelList
 | 
			
		||||
	case channeltype.OpenRouter:
 | 
			
		||||
		return "openrouter", openrouter.ModelList
 | 
			
		||||
	case channeltype.AliBailian:
 | 
			
		||||
		return "alibailian", alibailian.ModelList
 | 
			
		||||
	case channeltype.GeminiOpenAICompatible:
 | 
			
		||||
		return "geminiv2", geminiv2.ModelList
 | 
			
		||||
	default:
 | 
			
		||||
		return "openai", ModelList
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -7,11 +7,10 @@ var ModelList = []string{
 | 
			
		||||
	"gpt-4", "gpt-4-0314", "gpt-4-0613", "gpt-4-1106-preview", "gpt-4-0125-preview",
 | 
			
		||||
	"gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613",
 | 
			
		||||
	"gpt-4-turbo-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
 | 
			
		||||
	"gpt-4o", "gpt-4o-2024-05-13",
 | 
			
		||||
	"gpt-4o-2024-08-06",
 | 
			
		||||
	"gpt-4o-2024-11-20",
 | 
			
		||||
	"chatgpt-4o-latest",
 | 
			
		||||
	"gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20", "chatgpt-4o-latest",
 | 
			
		||||
	"gpt-4o-mini", "gpt-4o-mini-2024-07-18",
 | 
			
		||||
	"gpt-4o-mini-audio-preview", "gpt-4o-mini-audio-preview-2024-12-17",
 | 
			
		||||
	"gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-12-17", "gpt-4o-audio-preview-2024-10-01",
 | 
			
		||||
	"gpt-4-vision-preview",
 | 
			
		||||
	"text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large",
 | 
			
		||||
	"text-curie-001", "text-babbage-001", "text-ada-001", "text-davinci-002", "text-davinci-003",
 | 
			
		||||
@@ -24,4 +23,8 @@ var ModelList = []string{
 | 
			
		||||
	"o1", "o1-2024-12-17",
 | 
			
		||||
	"o1-preview", "o1-preview-2024-09-12",
 | 
			
		||||
	"o1-mini", "o1-mini-2024-09-12",
 | 
			
		||||
	"o3-mini", "o3-mini-2025-01-31",
 | 
			
		||||
	"gpt-4.5-preview", "gpt-4.5-preview-2025-02-27",
 | 
			
		||||
	// https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat
 | 
			
		||||
	"gpt-4o-search-preview", "gpt-4o-mini-search-preview",
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -17,6 +17,9 @@ func ResponseText2Usage(responseText string, modelName string, promptTokens int)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GetFullRequestURL(baseURL string, requestURL string, channelType int) string {
 | 
			
		||||
	if channelType == channeltype.OpenAICompatible {
 | 
			
		||||
		return fmt.Sprintf("%s%s", strings.TrimSuffix(baseURL, "/"), strings.TrimPrefix(requestURL, "/v1"))
 | 
			
		||||
	}
 | 
			
		||||
	fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
 | 
			
		||||
 | 
			
		||||
	if strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") {
 | 
			
		||||
 
 | 
			
		||||
@@ -3,12 +3,30 @@ package openai
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// ImagesEditsHandler just copy response body to client
 | 
			
		||||
//
 | 
			
		||||
// https://platform.openai.com/docs/api-reference/images/createEdit
 | 
			
		||||
func ImagesEditsHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	c.Writer.WriteHeader(resp.StatusCode)
 | 
			
		||||
	for k, v := range resp.Header {
 | 
			
		||||
		c.Writer.Header().Set(k, v[0])
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if _, err := io.Copy(c.Writer, resp.Body); err != nil {
 | 
			
		||||
		return ErrorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	defer resp.Body.Close()
 | 
			
		||||
 | 
			
		||||
	return nil, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func ImageHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	var imageResponse ImageResponse
 | 
			
		||||
	responseBody, err := io.ReadAll(resp.Body)
 | 
			
		||||
 
 | 
			
		||||
@@ -5,15 +5,16 @@ import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"io"
 | 
			
		||||
	"math"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/render"
 | 
			
		||||
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/conv"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/render"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/billing/ratio"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/relaymode"
 | 
			
		||||
)
 | 
			
		||||
@@ -24,128 +25,300 @@ const (
 | 
			
		||||
	dataPrefixLength = len(dataPrefix)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// StreamHandler processes streaming responses from OpenAI API
 | 
			
		||||
// It handles incremental content delivery and accumulates the final response text
 | 
			
		||||
// Returns error (if any), accumulated response text, and token usage information
 | 
			
		||||
func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.ErrorWithStatusCode, string, *model.Usage) {
 | 
			
		||||
	// Initialize accumulators for the response
 | 
			
		||||
	responseText := ""
 | 
			
		||||
	scanner := bufio.NewScanner(resp.Body)
 | 
			
		||||
	scanner.Split(bufio.ScanLines)
 | 
			
		||||
	reasoningText := ""
 | 
			
		||||
	var usage *model.Usage
 | 
			
		||||
 | 
			
		||||
	// Set up scanner for reading the stream line by line
 | 
			
		||||
	scanner := bufio.NewScanner(resp.Body)
 | 
			
		||||
	buffer := make([]byte, 256*1024) // 256KB buffer for large messages
 | 
			
		||||
	scanner.Buffer(buffer, len(buffer))
 | 
			
		||||
	scanner.Split(bufio.ScanLines)
 | 
			
		||||
 | 
			
		||||
	// Set response headers for SSE
 | 
			
		||||
	common.SetEventStreamHeaders(c)
 | 
			
		||||
 | 
			
		||||
	doneRendered := false
 | 
			
		||||
 | 
			
		||||
	// Process each line from the stream
 | 
			
		||||
	for scanner.Scan() {
 | 
			
		||||
		data := scanner.Text()
 | 
			
		||||
		if len(data) < dataPrefixLength { // ignore blank line or wrong format
 | 
			
		||||
			continue
 | 
			
		||||
		data := NormalizeDataLine(scanner.Text())
 | 
			
		||||
 | 
			
		||||
		// logger.Debugf(c.Request.Context(), "stream response: %s", data)
 | 
			
		||||
 | 
			
		||||
		// Skip lines that don't match expected format
 | 
			
		||||
		if len(data) < dataPrefixLength {
 | 
			
		||||
			continue // Ignore blank line or wrong format
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Verify line starts with expected prefix
 | 
			
		||||
		if data[:dataPrefixLength] != dataPrefix && data[:dataPrefixLength] != done {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Check for stream termination
 | 
			
		||||
		if strings.HasPrefix(data[dataPrefixLength:], done) {
 | 
			
		||||
			render.StringData(c, data)
 | 
			
		||||
			doneRendered = true
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Process based on relay mode
 | 
			
		||||
		switch relayMode {
 | 
			
		||||
		case relaymode.ChatCompletions:
 | 
			
		||||
			var streamResponse ChatCompletionsStreamResponse
 | 
			
		||||
 | 
			
		||||
			// Parse the JSON response
 | 
			
		||||
			err := json.Unmarshal([]byte(data[dataPrefixLength:]), &streamResponse)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				logger.SysError("error unmarshalling stream response: " + err.Error())
 | 
			
		||||
				render.StringData(c, data) // if error happened, pass the data to client
 | 
			
		||||
				continue                   // just ignore the error
 | 
			
		||||
				logger.Errorf(c.Request.Context(), "unmarshalling stream data %q got %+v", data, err)
 | 
			
		||||
				render.StringData(c, data) // Pass raw data to client if parsing fails
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Skip empty choices (Azure specific behavior)
 | 
			
		||||
			if len(streamResponse.Choices) == 0 && streamResponse.Usage == nil {
 | 
			
		||||
				// but for empty choice and no usage, we should not pass it to client, this is for azure
 | 
			
		||||
				continue // just ignore empty choice
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
			render.StringData(c, data)
 | 
			
		||||
 | 
			
		||||
			// Process each choice in the response
 | 
			
		||||
			for _, choice := range streamResponse.Choices {
 | 
			
		||||
				// Extract reasoning content from different possible fields
 | 
			
		||||
				currentReasoningChunk := extractReasoningContent(&choice.Delta)
 | 
			
		||||
 | 
			
		||||
				// Update accumulated reasoning text
 | 
			
		||||
				if currentReasoningChunk != "" {
 | 
			
		||||
					reasoningText += currentReasoningChunk
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				// Set the reasoning content in the format requested by client
 | 
			
		||||
				choice.Delta.SetReasoningContent(c.Query("reasoning_format"), currentReasoningChunk)
 | 
			
		||||
 | 
			
		||||
				// Accumulate response content
 | 
			
		||||
				responseText += conv.AsString(choice.Delta.Content)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Send the processed data to the client
 | 
			
		||||
			render.StringData(c, data)
 | 
			
		||||
 | 
			
		||||
			// Update usage information if available
 | 
			
		||||
			if streamResponse.Usage != nil {
 | 
			
		||||
				usage = streamResponse.Usage
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
		case relaymode.Completions:
 | 
			
		||||
			// Send the data immediately for Completions mode
 | 
			
		||||
			render.StringData(c, data)
 | 
			
		||||
 | 
			
		||||
			var streamResponse CompletionsStreamResponse
 | 
			
		||||
			err := json.Unmarshal([]byte(data[dataPrefixLength:]), &streamResponse)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				logger.SysError("error unmarshalling stream response: " + err.Error())
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Accumulate text from all choices
 | 
			
		||||
			for _, choice := range streamResponse.Choices {
 | 
			
		||||
				responseText += choice.Text
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Check for scanner errors
 | 
			
		||||
	if err := scanner.Err(); err != nil {
 | 
			
		||||
		logger.SysError("error reading stream: " + err.Error())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Ensure stream termination is sent to client
 | 
			
		||||
	if !doneRendered {
 | 
			
		||||
		render.Done(c)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err := resp.Body.Close()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
	// Clean up resources
 | 
			
		||||
	if err := resp.Body.Close(); err != nil {
 | 
			
		||||
		return ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), "", nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil, responseText, usage
 | 
			
		||||
	// Return the complete response text (reasoning + content) and usage
 | 
			
		||||
	return nil, reasoningText + responseText, usage
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Helper function to extract reasoning content from message delta
 | 
			
		||||
func extractReasoningContent(delta *model.Message) string {
 | 
			
		||||
	content := ""
 | 
			
		||||
 | 
			
		||||
	// Extract reasoning from different possible fields
 | 
			
		||||
	if delta.Reasoning != nil {
 | 
			
		||||
		content += *delta.Reasoning
 | 
			
		||||
		delta.Reasoning = nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if delta.ReasoningContent != nil {
 | 
			
		||||
		content += *delta.ReasoningContent
 | 
			
		||||
		delta.ReasoningContent = nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return content
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Handler processes non-streaming responses from OpenAI API
 | 
			
		||||
// Returns error (if any) and token usage information
 | 
			
		||||
func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) {
 | 
			
		||||
	var textResponse SlimTextResponse
 | 
			
		||||
	// Read the entire response body
 | 
			
		||||
	responseBody, err := io.ReadAll(resp.Body)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	err = resp.Body.Close()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
 | 
			
		||||
	// Close the original response body
 | 
			
		||||
	if err = resp.Body.Close(); err != nil {
 | 
			
		||||
		return ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	err = json.Unmarshal(responseBody, &textResponse)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
 | 
			
		||||
	// Parse the response JSON
 | 
			
		||||
	var textResponse SlimTextResponse
 | 
			
		||||
	if err = json.Unmarshal(responseBody, &textResponse); err != nil {
 | 
			
		||||
		return ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Check for API errors
 | 
			
		||||
	if textResponse.Error.Type != "" {
 | 
			
		||||
		return &model.ErrorWithStatusCode{
 | 
			
		||||
			Error:      textResponse.Error,
 | 
			
		||||
			StatusCode: resp.StatusCode,
 | 
			
		||||
		}, nil
 | 
			
		||||
	}
 | 
			
		||||
	// Reset response body
 | 
			
		||||
	resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
 | 
			
		||||
 | 
			
		||||
	// We shouldn't set the header before we parse the response body, because the parse part may fail.
 | 
			
		||||
	// And then we will have to send an error response, but in this case, the header has already been set.
 | 
			
		||||
	// So the HTTPClient will be confused by the response.
 | 
			
		||||
	// For example, Postman will report error, and we cannot check the response at all.
 | 
			
		||||
	for k, v := range resp.Header {
 | 
			
		||||
		c.Writer.Header().Set(k, v[0])
 | 
			
		||||
	// Process reasoning content in each choice
 | 
			
		||||
	for _, msg := range textResponse.Choices {
 | 
			
		||||
		reasoningContent := processReasoningContent(&msg)
 | 
			
		||||
 | 
			
		||||
		// Set reasoning in requested format if content exists
 | 
			
		||||
		if reasoningContent != "" {
 | 
			
		||||
			msg.SetReasoningContent(c.Query("reasoning_format"), reasoningContent)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Reset response body for forwarding to client
 | 
			
		||||
	resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
 | 
			
		||||
	logger.Debugf(c.Request.Context(), "handler response: %s", string(responseBody))
 | 
			
		||||
 | 
			
		||||
	// Forward all response headers (not just first value of each)
 | 
			
		||||
	for k, values := range resp.Header {
 | 
			
		||||
		for _, v := range values {
 | 
			
		||||
			c.Writer.Header().Add(k, v)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Set response status and copy body to client
 | 
			
		||||
	c.Writer.WriteHeader(resp.StatusCode)
 | 
			
		||||
	_, err = io.Copy(c.Writer, resp.Body)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
	if _, err = io.Copy(c.Writer, resp.Body); err != nil {
 | 
			
		||||
		return ErrorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
	err = resp.Body.Close()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
 | 
			
		||||
	// Close the reset body
 | 
			
		||||
	if err = resp.Body.Close(); err != nil {
 | 
			
		||||
		return ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if textResponse.Usage.TotalTokens == 0 || (textResponse.Usage.PromptTokens == 0 && textResponse.Usage.CompletionTokens == 0) {
 | 
			
		||||
	// Calculate token usage if not provided by API
 | 
			
		||||
	calculateTokenUsage(&textResponse, promptTokens, modelName)
 | 
			
		||||
 | 
			
		||||
	return nil, &textResponse.Usage
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// processReasoningContent is a helper function to extract and process reasoning content from the message
 | 
			
		||||
func processReasoningContent(msg *TextResponseChoice) string {
 | 
			
		||||
	var reasoningContent string
 | 
			
		||||
 | 
			
		||||
	// Check different locations for reasoning content
 | 
			
		||||
	switch {
 | 
			
		||||
	case msg.Reasoning != nil:
 | 
			
		||||
		reasoningContent = *msg.Reasoning
 | 
			
		||||
		msg.Reasoning = nil
 | 
			
		||||
	case msg.ReasoningContent != nil:
 | 
			
		||||
		reasoningContent = *msg.ReasoningContent
 | 
			
		||||
		msg.ReasoningContent = nil
 | 
			
		||||
	case msg.Message.Reasoning != nil:
 | 
			
		||||
		reasoningContent = *msg.Message.Reasoning
 | 
			
		||||
		msg.Message.Reasoning = nil
 | 
			
		||||
	case msg.Message.ReasoningContent != nil:
 | 
			
		||||
		reasoningContent = *msg.Message.ReasoningContent
 | 
			
		||||
		msg.Message.ReasoningContent = nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return reasoningContent
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Helper function to calculate token usage
 | 
			
		||||
func calculateTokenUsage(response *SlimTextResponse, promptTokens int, modelName string) {
 | 
			
		||||
	// Calculate tokens if not provided by the API
 | 
			
		||||
	if response.Usage.TotalTokens == 0 ||
 | 
			
		||||
		(response.Usage.PromptTokens == 0 && response.Usage.CompletionTokens == 0) {
 | 
			
		||||
 | 
			
		||||
		completionTokens := 0
 | 
			
		||||
		for _, choice := range textResponse.Choices {
 | 
			
		||||
		for _, choice := range response.Choices {
 | 
			
		||||
			// Count content tokens
 | 
			
		||||
			completionTokens += CountTokenText(choice.Message.StringContent(), modelName)
 | 
			
		||||
 | 
			
		||||
			// Count reasoning tokens in all possible locations
 | 
			
		||||
			if choice.Message.Reasoning != nil {
 | 
			
		||||
				completionTokens += CountToken(*choice.Message.Reasoning)
 | 
			
		||||
			}
 | 
			
		||||
			if choice.Message.ReasoningContent != nil {
 | 
			
		||||
				completionTokens += CountToken(*choice.Message.ReasoningContent)
 | 
			
		||||
			}
 | 
			
		||||
			if choice.Reasoning != nil {
 | 
			
		||||
				completionTokens += CountToken(*choice.Reasoning)
 | 
			
		||||
			}
 | 
			
		||||
			if choice.ReasoningContent != nil {
 | 
			
		||||
				completionTokens += CountToken(*choice.ReasoningContent)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		textResponse.Usage = model.Usage{
 | 
			
		||||
 | 
			
		||||
		// Set usage values
 | 
			
		||||
		response.Usage = model.Usage{
 | 
			
		||||
			PromptTokens:     promptTokens,
 | 
			
		||||
			CompletionTokens: completionTokens,
 | 
			
		||||
			TotalTokens:      promptTokens + completionTokens,
 | 
			
		||||
		}
 | 
			
		||||
	} else if hasAudioTokens(response) {
 | 
			
		||||
		// Handle audio tokens conversion
 | 
			
		||||
		calculateAudioTokens(response, modelName)
 | 
			
		||||
	}
 | 
			
		||||
	return nil, &textResponse.Usage
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Helper function to check if response has audio tokens
 | 
			
		||||
func hasAudioTokens(response *SlimTextResponse) bool {
 | 
			
		||||
	return (response.PromptTokensDetails != nil && response.PromptTokensDetails.AudioTokens > 0) ||
 | 
			
		||||
		(response.CompletionTokensDetails != nil && response.CompletionTokensDetails.AudioTokens > 0)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Helper function to calculate audio token usage
 | 
			
		||||
func calculateAudioTokens(response *SlimTextResponse, modelName string) {
 | 
			
		||||
	// Convert audio tokens for prompt
 | 
			
		||||
	if response.PromptTokensDetails != nil {
 | 
			
		||||
		response.Usage.PromptTokens = response.PromptTokensDetails.TextTokens +
 | 
			
		||||
			int(math.Ceil(
 | 
			
		||||
				float64(response.PromptTokensDetails.AudioTokens)*
 | 
			
		||||
					ratio.GetAudioPromptRatio(modelName),
 | 
			
		||||
			))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Convert audio tokens for completion
 | 
			
		||||
	if response.CompletionTokensDetails != nil {
 | 
			
		||||
		response.Usage.CompletionTokens = response.CompletionTokensDetails.TextTokens +
 | 
			
		||||
			int(math.Ceil(
 | 
			
		||||
				float64(response.CompletionTokensDetails.AudioTokens)*
 | 
			
		||||
					ratio.GetAudioPromptRatio(modelName)*ratio.GetAudioCompletionRatio(modelName),
 | 
			
		||||
			))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Calculate total tokens
 | 
			
		||||
	response.Usage.TotalTokens = response.Usage.PromptTokens + response.Usage.CompletionTokens
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,10 @@
 | 
			
		||||
package openai
 | 
			
		||||
 | 
			
		||||
import "github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
import (
 | 
			
		||||
	"mime/multipart"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type TextContent struct {
 | 
			
		||||
	Type string `json:"type,omitempty"`
 | 
			
		||||
@@ -71,6 +75,24 @@ type TextToSpeechRequest struct {
 | 
			
		||||
	ResponseFormat string  `json:"response_format"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type AudioTranscriptionRequest struct {
 | 
			
		||||
	File                 *multipart.FileHeader `form:"file" binding:"required"`
 | 
			
		||||
	Model                string                `form:"model" binding:"required"`
 | 
			
		||||
	Language             string                `form:"language"`
 | 
			
		||||
	Prompt               string                `form:"prompt"`
 | 
			
		||||
	ReponseFormat        string                `form:"response_format" binding:"oneof=json text srt verbose_json vtt"`
 | 
			
		||||
	Temperature          float64               `form:"temperature"`
 | 
			
		||||
	TimestampGranularity []string              `form:"timestamp_granularity"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type AudioTranslationRequest struct {
 | 
			
		||||
	File           *multipart.FileHeader `form:"file" binding:"required"`
 | 
			
		||||
	Model          string                `form:"model" binding:"required"`
 | 
			
		||||
	Prompt         string                `form:"prompt"`
 | 
			
		||||
	ResponseFormat string                `form:"response_format" binding:"oneof=json text srt verbose_json vtt"`
 | 
			
		||||
	Temperature    float64               `form:"temperature"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type UsageOrResponseText struct {
 | 
			
		||||
	*model.Usage
 | 
			
		||||
	ResponseText string
 | 
			
		||||
@@ -110,12 +132,14 @@ type EmbeddingResponse struct {
 | 
			
		||||
	model.Usage `json:"usage"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ImageData represents an image in the response
 | 
			
		||||
type ImageData struct {
 | 
			
		||||
	Url           string `json:"url,omitempty"`
 | 
			
		||||
	B64Json       string `json:"b64_json,omitempty"`
 | 
			
		||||
	RevisedPrompt string `json:"revised_prompt,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ImageResponse represents the response structure for image generations
 | 
			
		||||
type ImageResponse struct {
 | 
			
		||||
	Created int64       `json:"created"`
 | 
			
		||||
	Data    []ImageData `json:"data"`
 | 
			
		||||
 
 | 
			
		||||
@@ -1,16 +1,20 @@
 | 
			
		||||
package openai
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"context"
 | 
			
		||||
	"encoding/base64"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"math"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/pkg/errors"
 | 
			
		||||
	"github.com/pkoukk/tiktoken-go"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/image"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/billing/ratio"
 | 
			
		||||
	billingratio "github.com/songquanpeng/one-api/relay/billing/ratio"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
)
 | 
			
		||||
@@ -73,8 +77,10 @@ func getTokenNum(tokenEncoder *tiktoken.Tiktoken, text string) int {
 | 
			
		||||
	return len(tokenEncoder.Encode(text, nil, nil))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func CountTokenMessages(messages []model.Message, model string) int {
 | 
			
		||||
	tokenEncoder := getTokenEncoder(model)
 | 
			
		||||
// CountTokenMessages counts the number of tokens in a list of messages.
 | 
			
		||||
func CountTokenMessages(ctx context.Context,
 | 
			
		||||
	messages []model.Message, actualModel string) int {
 | 
			
		||||
	tokenEncoder := getTokenEncoder(actualModel)
 | 
			
		||||
	// Reference:
 | 
			
		||||
	// https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
 | 
			
		||||
	// https://github.com/pkoukk/tiktoken-go/issues/6
 | 
			
		||||
@@ -82,47 +88,54 @@ func CountTokenMessages(messages []model.Message, model string) int {
 | 
			
		||||
	// Every message follows <|start|>{role/name}\n{content}<|end|>\n
 | 
			
		||||
	var tokensPerMessage int
 | 
			
		||||
	var tokensPerName int
 | 
			
		||||
	if model == "gpt-3.5-turbo-0301" {
 | 
			
		||||
	if actualModel == "gpt-3.5-turbo-0301" {
 | 
			
		||||
		tokensPerMessage = 4
 | 
			
		||||
		tokensPerName = -1 // If there's a name, the role is omitted
 | 
			
		||||
	} else {
 | 
			
		||||
		tokensPerMessage = 3
 | 
			
		||||
		tokensPerName = 1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	tokenNum := 0
 | 
			
		||||
	var totalAudioTokens float64
 | 
			
		||||
	for _, message := range messages {
 | 
			
		||||
		tokenNum += tokensPerMessage
 | 
			
		||||
		switch v := message.Content.(type) {
 | 
			
		||||
		case string:
 | 
			
		||||
			tokenNum += getTokenNum(tokenEncoder, v)
 | 
			
		||||
		case []any:
 | 
			
		||||
			for _, it := range v {
 | 
			
		||||
				m := it.(map[string]any)
 | 
			
		||||
				switch m["type"] {
 | 
			
		||||
				case "text":
 | 
			
		||||
					if textValue, ok := m["text"]; ok {
 | 
			
		||||
						if textString, ok := textValue.(string); ok {
 | 
			
		||||
							tokenNum += getTokenNum(tokenEncoder, textString)
 | 
			
		||||
						}
 | 
			
		||||
					}
 | 
			
		||||
				case "image_url":
 | 
			
		||||
					imageUrl, ok := m["image_url"].(map[string]any)
 | 
			
		||||
					if ok {
 | 
			
		||||
						url := imageUrl["url"].(string)
 | 
			
		||||
						detail := ""
 | 
			
		||||
						if imageUrl["detail"] != nil {
 | 
			
		||||
							detail = imageUrl["detail"].(string)
 | 
			
		||||
						}
 | 
			
		||||
						imageTokens, err := countImageTokens(url, detail, model)
 | 
			
		||||
						if err != nil {
 | 
			
		||||
							logger.SysError("error counting image tokens: " + err.Error())
 | 
			
		||||
						} else {
 | 
			
		||||
							tokenNum += imageTokens
 | 
			
		||||
						}
 | 
			
		||||
					}
 | 
			
		||||
		contents := message.ParseContent()
 | 
			
		||||
		for _, content := range contents {
 | 
			
		||||
			switch content.Type {
 | 
			
		||||
			case model.ContentTypeText:
 | 
			
		||||
				if content.Text != nil {
 | 
			
		||||
					tokenNum += getTokenNum(tokenEncoder, *content.Text)
 | 
			
		||||
				}
 | 
			
		||||
			case model.ContentTypeImageURL:
 | 
			
		||||
				imageTokens, err := countImageTokens(
 | 
			
		||||
					content.ImageURL.Url,
 | 
			
		||||
					content.ImageURL.Detail,
 | 
			
		||||
					actualModel)
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					logger.SysError("error counting image tokens: " + err.Error())
 | 
			
		||||
				} else {
 | 
			
		||||
					tokenNum += imageTokens
 | 
			
		||||
				}
 | 
			
		||||
			case model.ContentTypeInputAudio:
 | 
			
		||||
				audioData, err := base64.StdEncoding.DecodeString(content.InputAudio.Data)
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					logger.SysError("error decoding audio data: " + err.Error())
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				audioTokens, err := helper.GetAudioTokens(ctx,
 | 
			
		||||
					bytes.NewReader(audioData),
 | 
			
		||||
					ratio.GetAudioPromptTokensPerSecond(actualModel))
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					logger.SysError("error counting audio tokens: " + err.Error())
 | 
			
		||||
				} else {
 | 
			
		||||
					totalAudioTokens += audioTokens
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		tokenNum += int(math.Ceil(totalAudioTokens))
 | 
			
		||||
 | 
			
		||||
		tokenNum += getTokenNum(tokenEncoder, message.Role)
 | 
			
		||||
		if message.Name != nil {
 | 
			
		||||
			tokenNum += tokensPerName
 | 
			
		||||
 
 | 
			
		||||
@@ -3,6 +3,7 @@ package openai
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
@@ -21,3 +22,11 @@ func ErrorWrapper(err error, code string, statusCode int) *model.ErrorWithStatus
 | 
			
		||||
		StatusCode: statusCode,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NormalizeDataLine(data string) string {
 | 
			
		||||
	if strings.HasPrefix(data, "data:") {
 | 
			
		||||
		content := strings.TrimLeft(data[len("data:"):], " ")
 | 
			
		||||
		return "data: " + content
 | 
			
		||||
	}
 | 
			
		||||
	return data
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,20 +1,235 @@
 | 
			
		||||
package openrouter
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"openai/gpt-3.5-turbo",
 | 
			
		||||
	"openai/chatgpt-4o-latest",
 | 
			
		||||
	"openai/o1",
 | 
			
		||||
	"openai/o1-preview",
 | 
			
		||||
	"openai/o1-mini",
 | 
			
		||||
	"openai/o3-mini",
 | 
			
		||||
	"google/gemini-2.0-flash-001",
 | 
			
		||||
	"google/gemini-2.0-flash-thinking-exp:free",
 | 
			
		||||
	"google/gemini-2.0-flash-lite-preview-02-05:free",
 | 
			
		||||
	"google/gemini-2.0-pro-exp-02-05:free",
 | 
			
		||||
	"google/gemini-flash-1.5-8b",
 | 
			
		||||
	"anthropic/claude-3.5-sonnet",
 | 
			
		||||
	"01-ai/yi-large",
 | 
			
		||||
	"aetherwiing/mn-starcannon-12b",
 | 
			
		||||
	"ai21/jamba-1-5-large",
 | 
			
		||||
	"ai21/jamba-1-5-mini",
 | 
			
		||||
	"ai21/jamba-instruct",
 | 
			
		||||
	"aion-labs/aion-1.0",
 | 
			
		||||
	"aion-labs/aion-1.0-mini",
 | 
			
		||||
	"aion-labs/aion-rp-llama-3.1-8b",
 | 
			
		||||
	"allenai/llama-3.1-tulu-3-405b",
 | 
			
		||||
	"alpindale/goliath-120b",
 | 
			
		||||
	"alpindale/magnum-72b",
 | 
			
		||||
	"amazon/nova-lite-v1",
 | 
			
		||||
	"amazon/nova-micro-v1",
 | 
			
		||||
	"amazon/nova-pro-v1",
 | 
			
		||||
	"anthracite-org/magnum-v2-72b",
 | 
			
		||||
	"anthracite-org/magnum-v4-72b",
 | 
			
		||||
	"anthropic/claude-2",
 | 
			
		||||
	"anthropic/claude-2.0",
 | 
			
		||||
	"anthropic/claude-2.0:beta",
 | 
			
		||||
	"anthropic/claude-2.1",
 | 
			
		||||
	"anthropic/claude-2.1:beta",
 | 
			
		||||
	"anthropic/claude-2:beta",
 | 
			
		||||
	"anthropic/claude-3-haiku",
 | 
			
		||||
	"anthropic/claude-3-haiku:beta",
 | 
			
		||||
	"anthropic/claude-3-opus",
 | 
			
		||||
	"anthropic/claude-3-opus:beta",
 | 
			
		||||
	"anthropic/claude-3-sonnet",
 | 
			
		||||
	"anthropic/claude-3-sonnet:beta",
 | 
			
		||||
	"anthropic/claude-3.5-haiku",
 | 
			
		||||
	"deepseek/deepseek-r1:free",
 | 
			
		||||
	"anthropic/claude-3.5-haiku-20241022",
 | 
			
		||||
	"anthropic/claude-3.5-haiku-20241022:beta",
 | 
			
		||||
	"anthropic/claude-3.5-haiku:beta",
 | 
			
		||||
	"anthropic/claude-3.5-sonnet",
 | 
			
		||||
	"anthropic/claude-3.5-sonnet-20240620",
 | 
			
		||||
	"anthropic/claude-3.5-sonnet-20240620:beta",
 | 
			
		||||
	"anthropic/claude-3.5-sonnet:beta",
 | 
			
		||||
	"cognitivecomputations/dolphin-mixtral-8x22b",
 | 
			
		||||
	"cognitivecomputations/dolphin-mixtral-8x7b",
 | 
			
		||||
	"cohere/command",
 | 
			
		||||
	"cohere/command-r",
 | 
			
		||||
	"cohere/command-r-03-2024",
 | 
			
		||||
	"cohere/command-r-08-2024",
 | 
			
		||||
	"cohere/command-r-plus",
 | 
			
		||||
	"cohere/command-r-plus-04-2024",
 | 
			
		||||
	"cohere/command-r-plus-08-2024",
 | 
			
		||||
	"cohere/command-r7b-12-2024",
 | 
			
		||||
	"databricks/dbrx-instruct",
 | 
			
		||||
	"deepseek/deepseek-chat",
 | 
			
		||||
	"deepseek/deepseek-chat-v2.5",
 | 
			
		||||
	"deepseek/deepseek-chat:free",
 | 
			
		||||
	"deepseek/deepseek-r1",
 | 
			
		||||
	"deepseek/deepseek-r1-distill-llama-70b",
 | 
			
		||||
	"deepseek/deepseek-r1-distill-llama-70b:free",
 | 
			
		||||
	"deepseek/deepseek-r1-distill-llama-8b",
 | 
			
		||||
	"deepseek/deepseek-r1-distill-qwen-1.5b",
 | 
			
		||||
	"deepseek/deepseek-r1-distill-qwen-14b",
 | 
			
		||||
	"deepseek/deepseek-r1-distill-qwen-32b",
 | 
			
		||||
	"deepseek/deepseek-r1:free",
 | 
			
		||||
	"eva-unit-01/eva-llama-3.33-70b",
 | 
			
		||||
	"eva-unit-01/eva-qwen-2.5-32b",
 | 
			
		||||
	"eva-unit-01/eva-qwen-2.5-72b",
 | 
			
		||||
	"google/gemini-2.0-flash-001",
 | 
			
		||||
	"google/gemini-2.0-flash-exp:free",
 | 
			
		||||
	"google/gemini-2.0-flash-lite-preview-02-05:free",
 | 
			
		||||
	"google/gemini-2.0-flash-thinking-exp-1219:free",
 | 
			
		||||
	"google/gemini-2.0-flash-thinking-exp:free",
 | 
			
		||||
	"google/gemini-2.0-pro-exp-02-05:free",
 | 
			
		||||
	"google/gemini-exp-1206:free",
 | 
			
		||||
	"google/gemini-flash-1.5",
 | 
			
		||||
	"google/gemini-flash-1.5-8b",
 | 
			
		||||
	"google/gemini-flash-1.5-8b-exp",
 | 
			
		||||
	"google/gemini-pro",
 | 
			
		||||
	"google/gemini-pro-1.5",
 | 
			
		||||
	"google/gemini-pro-vision",
 | 
			
		||||
	"google/gemma-2-27b-it",
 | 
			
		||||
	"google/gemma-2-9b-it",
 | 
			
		||||
	"google/gemma-2-9b-it:free",
 | 
			
		||||
	"google/gemma-7b-it",
 | 
			
		||||
	"google/learnlm-1.5-pro-experimental:free",
 | 
			
		||||
	"google/palm-2-chat-bison",
 | 
			
		||||
	"google/palm-2-chat-bison-32k",
 | 
			
		||||
	"google/palm-2-codechat-bison",
 | 
			
		||||
	"google/palm-2-codechat-bison-32k",
 | 
			
		||||
	"gryphe/mythomax-l2-13b",
 | 
			
		||||
	"gryphe/mythomax-l2-13b:free",
 | 
			
		||||
	"huggingfaceh4/zephyr-7b-beta:free",
 | 
			
		||||
	"infermatic/mn-inferor-12b",
 | 
			
		||||
	"inflection/inflection-3-pi",
 | 
			
		||||
	"inflection/inflection-3-productivity",
 | 
			
		||||
	"jondurbin/airoboros-l2-70b",
 | 
			
		||||
	"liquid/lfm-3b",
 | 
			
		||||
	"liquid/lfm-40b",
 | 
			
		||||
	"liquid/lfm-7b",
 | 
			
		||||
	"mancer/weaver",
 | 
			
		||||
	"meta-llama/llama-2-13b-chat",
 | 
			
		||||
	"meta-llama/llama-2-70b-chat",
 | 
			
		||||
	"meta-llama/llama-3-70b-instruct",
 | 
			
		||||
	"meta-llama/llama-3-8b-instruct",
 | 
			
		||||
	"meta-llama/llama-3-8b-instruct:free",
 | 
			
		||||
	"meta-llama/llama-3.1-405b",
 | 
			
		||||
	"meta-llama/llama-3.1-405b-instruct",
 | 
			
		||||
	"meta-llama/llama-3.1-70b-instruct",
 | 
			
		||||
	"meta-llama/llama-3.1-8b-instruct",
 | 
			
		||||
	"meta-llama/llama-3.2-11b-vision-instruct",
 | 
			
		||||
	"meta-llama/llama-3.2-11b-vision-instruct:free",
 | 
			
		||||
	"meta-llama/llama-3.2-1b-instruct",
 | 
			
		||||
	"meta-llama/llama-3.2-3b-instruct",
 | 
			
		||||
	"meta-llama/llama-3.2-90b-vision-instruct",
 | 
			
		||||
	"meta-llama/llama-3.3-70b-instruct",
 | 
			
		||||
	"meta-llama/llama-3.3-70b-instruct:free",
 | 
			
		||||
	"meta-llama/llama-guard-2-8b",
 | 
			
		||||
	"microsoft/phi-3-medium-128k-instruct",
 | 
			
		||||
	"microsoft/phi-3-medium-128k-instruct:free",
 | 
			
		||||
	"microsoft/phi-3-mini-128k-instruct",
 | 
			
		||||
	"microsoft/phi-3-mini-128k-instruct:free",
 | 
			
		||||
	"microsoft/phi-3.5-mini-128k-instruct",
 | 
			
		||||
	"microsoft/phi-4",
 | 
			
		||||
	"microsoft/wizardlm-2-7b",
 | 
			
		||||
	"microsoft/wizardlm-2-8x22b",
 | 
			
		||||
	"minimax/minimax-01",
 | 
			
		||||
	"mistralai/codestral-2501",
 | 
			
		||||
	"mistralai/codestral-mamba",
 | 
			
		||||
	"mistralai/ministral-3b",
 | 
			
		||||
	"mistralai/ministral-8b",
 | 
			
		||||
	"mistralai/mistral-7b-instruct",
 | 
			
		||||
	"mistralai/mistral-7b-instruct-v0.1",
 | 
			
		||||
	"mistralai/mistral-7b-instruct-v0.3",
 | 
			
		||||
	"mistralai/mistral-7b-instruct:free",
 | 
			
		||||
	"mistralai/mistral-large",
 | 
			
		||||
	"mistralai/mistral-large-2407",
 | 
			
		||||
	"mistralai/mistral-large-2411",
 | 
			
		||||
	"mistralai/mistral-medium",
 | 
			
		||||
	"mistralai/mistral-nemo",
 | 
			
		||||
	"mistralai/mistral-nemo:free",
 | 
			
		||||
	"mistralai/mistral-small",
 | 
			
		||||
	"mistralai/mistral-small-24b-instruct-2501",
 | 
			
		||||
	"mistralai/mistral-small-24b-instruct-2501:free",
 | 
			
		||||
	"mistralai/mistral-tiny",
 | 
			
		||||
	"mistralai/mixtral-8x22b-instruct",
 | 
			
		||||
	"mistralai/mixtral-8x7b",
 | 
			
		||||
	"mistralai/mixtral-8x7b-instruct",
 | 
			
		||||
	"mistralai/pixtral-12b",
 | 
			
		||||
	"mistralai/pixtral-large-2411",
 | 
			
		||||
	"neversleep/llama-3-lumimaid-70b",
 | 
			
		||||
	"neversleep/llama-3-lumimaid-8b",
 | 
			
		||||
	"neversleep/llama-3-lumimaid-8b:extended",
 | 
			
		||||
	"neversleep/llama-3.1-lumimaid-70b",
 | 
			
		||||
	"neversleep/llama-3.1-lumimaid-8b",
 | 
			
		||||
	"neversleep/noromaid-20b",
 | 
			
		||||
	"nothingiisreal/mn-celeste-12b",
 | 
			
		||||
	"nousresearch/hermes-2-pro-llama-3-8b",
 | 
			
		||||
	"nousresearch/hermes-3-llama-3.1-405b",
 | 
			
		||||
	"nousresearch/hermes-3-llama-3.1-70b",
 | 
			
		||||
	"nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
 | 
			
		||||
	"nousresearch/nous-hermes-llama2-13b",
 | 
			
		||||
	"nvidia/llama-3.1-nemotron-70b-instruct",
 | 
			
		||||
	"nvidia/llama-3.1-nemotron-70b-instruct:free",
 | 
			
		||||
	"openai/chatgpt-4o-latest",
 | 
			
		||||
	"openai/gpt-3.5-turbo",
 | 
			
		||||
	"openai/gpt-3.5-turbo-0125",
 | 
			
		||||
	"openai/gpt-3.5-turbo-0613",
 | 
			
		||||
	"openai/gpt-3.5-turbo-1106",
 | 
			
		||||
	"openai/gpt-3.5-turbo-16k",
 | 
			
		||||
	"openai/gpt-3.5-turbo-instruct",
 | 
			
		||||
	"openai/gpt-4",
 | 
			
		||||
	"openai/gpt-4-0314",
 | 
			
		||||
	"openai/gpt-4-1106-preview",
 | 
			
		||||
	"openai/gpt-4-32k",
 | 
			
		||||
	"openai/gpt-4-32k-0314",
 | 
			
		||||
	"openai/gpt-4-turbo",
 | 
			
		||||
	"openai/gpt-4-turbo-preview",
 | 
			
		||||
	"openai/gpt-4o",
 | 
			
		||||
	"openai/gpt-4o-2024-05-13",
 | 
			
		||||
	"openai/gpt-4o-2024-08-06",
 | 
			
		||||
	"openai/gpt-4o-2024-11-20",
 | 
			
		||||
	"openai/gpt-4o-mini",
 | 
			
		||||
	"openai/gpt-4o-mini-2024-07-18",
 | 
			
		||||
	"openai/gpt-4o:extended",
 | 
			
		||||
	"openai/o1",
 | 
			
		||||
	"openai/o1-mini",
 | 
			
		||||
	"openai/o1-mini-2024-09-12",
 | 
			
		||||
	"openai/o1-preview",
 | 
			
		||||
	"openai/o1-preview-2024-09-12",
 | 
			
		||||
	"openai/o3-mini",
 | 
			
		||||
	"openai/o3-mini-high",
 | 
			
		||||
	"openchat/openchat-7b",
 | 
			
		||||
	"openchat/openchat-7b:free",
 | 
			
		||||
	"openrouter/auto",
 | 
			
		||||
	"perplexity/llama-3.1-sonar-huge-128k-online",
 | 
			
		||||
	"perplexity/llama-3.1-sonar-large-128k-chat",
 | 
			
		||||
	"perplexity/llama-3.1-sonar-large-128k-online",
 | 
			
		||||
	"perplexity/llama-3.1-sonar-small-128k-chat",
 | 
			
		||||
	"perplexity/llama-3.1-sonar-small-128k-online",
 | 
			
		||||
	"perplexity/sonar",
 | 
			
		||||
	"perplexity/sonar-reasoning",
 | 
			
		||||
	"pygmalionai/mythalion-13b",
 | 
			
		||||
	"qwen/qvq-72b-preview",
 | 
			
		||||
	"qwen/qwen-2-72b-instruct",
 | 
			
		||||
	"qwen/qwen-2-7b-instruct",
 | 
			
		||||
	"qwen/qwen-2-7b-instruct:free",
 | 
			
		||||
	"qwen/qwen-2-vl-72b-instruct",
 | 
			
		||||
	"qwen/qwen-2-vl-7b-instruct",
 | 
			
		||||
	"qwen/qwen-2.5-72b-instruct",
 | 
			
		||||
	"qwen/qwen-2.5-7b-instruct",
 | 
			
		||||
	"qwen/qwen-2.5-coder-32b-instruct",
 | 
			
		||||
	"qwen/qwen-max",
 | 
			
		||||
	"qwen/qwen-plus",
 | 
			
		||||
	"qwen/qwen-turbo",
 | 
			
		||||
	"qwen/qwen-vl-plus:free",
 | 
			
		||||
	"qwen/qwen2.5-vl-72b-instruct:free",
 | 
			
		||||
	"qwen/qwq-32b-preview",
 | 
			
		||||
	"raifle/sorcererlm-8x22b",
 | 
			
		||||
	"sao10k/fimbulvetr-11b-v2",
 | 
			
		||||
	"sao10k/l3-euryale-70b",
 | 
			
		||||
	"sao10k/l3-lunaris-8b",
 | 
			
		||||
	"sao10k/l3.1-70b-hanami-x1",
 | 
			
		||||
	"sao10k/l3.1-euryale-70b",
 | 
			
		||||
	"sao10k/l3.3-euryale-70b",
 | 
			
		||||
	"sophosympatheia/midnight-rose-70b",
 | 
			
		||||
	"sophosympatheia/rogue-rose-103b-v0.2:free",
 | 
			
		||||
	"teknium/openhermes-2.5-mistral-7b",
 | 
			
		||||
	"thedrummer/rocinante-12b",
 | 
			
		||||
	"thedrummer/unslopnemo-12b",
 | 
			
		||||
	"undi95/remm-slerp-l2-13b",
 | 
			
		||||
	"undi95/toppy-m-7b",
 | 
			
		||||
	"undi95/toppy-m-7b:free",
 | 
			
		||||
	"x-ai/grok-2-1212",
 | 
			
		||||
	"x-ai/grok-2-vision-1212",
 | 
			
		||||
	"x-ai/grok-beta",
 | 
			
		||||
	"x-ai/grok-vision-beta",
 | 
			
		||||
	"xwin-lm/xwin-lm-70b",
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										22
									
								
								relay/adaptor/openrouter/model.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								relay/adaptor/openrouter/model.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,22 @@
 | 
			
		||||
package openrouter
 | 
			
		||||
 | 
			
		||||
// RequestProvider customize how your requests are routed using the provider object
 | 
			
		||||
// in the request body for Chat Completions and Completions.
 | 
			
		||||
//
 | 
			
		||||
// https://openrouter.ai/docs/features/provider-routing
 | 
			
		||||
type RequestProvider struct {
 | 
			
		||||
	// Order is list of provider names to try in order (e.g. ["Anthropic", "OpenAI"]). Default: empty
 | 
			
		||||
	Order []string `json:"order,omitempty"`
 | 
			
		||||
	// AllowFallbacks is whether to allow backup providers when the primary is unavailable. Default: true
 | 
			
		||||
	AllowFallbacks bool `json:"allow_fallbacks,omitempty"`
 | 
			
		||||
	// RequireParameters is only use providers that support all parameters in your request. Default: false
 | 
			
		||||
	RequireParameters bool `json:"require_parameters,omitempty"`
 | 
			
		||||
	// DataCollection is control whether to use providers that may store data ("allow" or "deny"). Default: "allow"
 | 
			
		||||
	DataCollection string `json:"data_collection,omitempty" binding:"omitempty,oneof=allow deny"`
 | 
			
		||||
	// Ignore is list of provider names to skip for this request. Default: empty
 | 
			
		||||
	Ignore []string `json:"ignore,omitempty"`
 | 
			
		||||
	// Quantizations is list of quantization levels to filter by (e.g. ["int4", "int8"]). Default: empty
 | 
			
		||||
	Quantizations []string `json:"quantizations,omitempty"`
 | 
			
		||||
	// Sort is sort providers by price or throughput (e.g. "price" or "throughput"). Default: empty
 | 
			
		||||
	Sort string `json:"sort,omitempty" binding:"omitempty,oneof=price throughput latency"`
 | 
			
		||||
}
 | 
			
		||||
@@ -36,7 +36,7 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
 | 
			
		||||
	return ConvertRequest(*request), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -25,11 +25,17 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
 | 
			
		||||
		Prompt: Prompt{
 | 
			
		||||
			Messages: make([]ChatMessage, 0, len(textRequest.Messages)),
 | 
			
		||||
		},
 | 
			
		||||
		Temperature:    textRequest.Temperature,
 | 
			
		||||
		CandidateCount: textRequest.N,
 | 
			
		||||
		TopP:           textRequest.TopP,
 | 
			
		||||
		TopK:           textRequest.MaxTokens,
 | 
			
		||||
		Temperature: textRequest.Temperature,
 | 
			
		||||
		TopP:        textRequest.TopP,
 | 
			
		||||
		TopK:        textRequest.MaxTokens,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if textRequest.N != nil {
 | 
			
		||||
		palmRequest.CandidateCount = *textRequest.N
 | 
			
		||||
	} else {
 | 
			
		||||
		palmRequest.CandidateCount = 1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, message := range textRequest.Messages {
 | 
			
		||||
		palmMessage := ChatMessage{
 | 
			
		||||
			Content: message.StringContent(),
 | 
			
		||||
 
 | 
			
		||||
@@ -80,7 +80,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *me
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	return nil, errors.Errorf("not implement")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -23,7 +23,7 @@ type Adaptor struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertImageRequest implements adaptor.Adaptor.
 | 
			
		||||
func (*Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (*Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	return DrawImageRequest{
 | 
			
		||||
		Input: ImageInput{
 | 
			
		||||
			Steps:           25,
 | 
			
		||||
 
 | 
			
		||||
@@ -33,9 +33,16 @@ var ModelList = []string{
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	// language model
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	"anthropic/claude-3.5-haiku",
 | 
			
		||||
	"anthropic/claude-3.5-sonnet",
 | 
			
		||||
	"anthropic/claude-3.7-sonnet",
 | 
			
		||||
	"deepseek-ai/deepseek-r1",
 | 
			
		||||
	"ibm-granite/granite-20b-code-instruct-8k",
 | 
			
		||||
	"ibm-granite/granite-3.0-2b-instruct",
 | 
			
		||||
	"ibm-granite/granite-3.0-8b-instruct",
 | 
			
		||||
	"ibm-granite/granite-3.1-2b-instruct",
 | 
			
		||||
	"ibm-granite/granite-3.1-8b-instruct",
 | 
			
		||||
	"ibm-granite/granite-3.2-8b-instruct",
 | 
			
		||||
	"ibm-granite/granite-8b-code-instruct-128k",
 | 
			
		||||
	"meta/llama-2-13b",
 | 
			
		||||
	"meta/llama-2-13b-chat",
 | 
			
		||||
@@ -50,7 +57,6 @@ var ModelList = []string{
 | 
			
		||||
	"meta/meta-llama-3-8b-instruct",
 | 
			
		||||
	"mistralai/mistral-7b-instruct-v0.2",
 | 
			
		||||
	"mistralai/mistral-7b-v0.1",
 | 
			
		||||
	"mistralai/mixtral-8x7b-instruct-v0.1",
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	// video model
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
 
 | 
			
		||||
@@ -69,7 +69,7 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
 | 
			
		||||
	return convertedRequest, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -105,7 +105,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *me
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -19,6 +19,7 @@ var ModelList = []string{
 | 
			
		||||
	"claude-3-5-sonnet@20240620",
 | 
			
		||||
	"claude-3-5-sonnet-v2@20241022",
 | 
			
		||||
	"claude-3-5-haiku@20241022",
 | 
			
		||||
	"claude-3-7-sonnet@20250219",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const anthropicVersion = "vertex-2023-10-16"
 | 
			
		||||
@@ -31,7 +32,11 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	claudeReq := anthropic.ConvertRequest(*request)
 | 
			
		||||
	claudeReq, err := anthropic.ConvertRequest(c, *request)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, errors.Wrap(err, "convert request")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	req := Request{
 | 
			
		||||
		AnthropicVersion: anthropicVersion,
 | 
			
		||||
		// Model:            claudeReq.Model,
 | 
			
		||||
 
 | 
			
		||||
@@ -16,10 +16,12 @@ import (
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"gemini-pro", "gemini-pro-vision",
 | 
			
		||||
	"gemini-1.5-pro-001", "gemini-1.5-flash-001",
 | 
			
		||||
	"gemini-1.5-pro-002", "gemini-1.5-flash-002",
 | 
			
		||||
	"gemini-2.0-flash-exp",
 | 
			
		||||
	"gemini-2.0-flash-thinking-exp", "gemini-2.0-flash-thinking-exp-01-21",
 | 
			
		||||
	"gemini-exp-1206",
 | 
			
		||||
	"gemini-1.5-pro-001", "gemini-1.5-pro-002",
 | 
			
		||||
	"gemini-1.5-flash-001", "gemini-1.5-flash-002",
 | 
			
		||||
	"gemini-2.0-flash-exp", "gemini-2.0-flash-001",
 | 
			
		||||
	"gemini-2.0-flash-lite-preview-02-05",
 | 
			
		||||
	"gemini-2.0-flash-thinking-exp-01-21",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Adaptor struct {
 | 
			
		||||
 
 | 
			
		||||
@@ -39,7 +39,7 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
 | 
			
		||||
	return nil, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -41,10 +41,15 @@ func requestOpenAI2Xunfei(request model.GeneralOpenAIRequest, xunfeiAppId string
 | 
			
		||||
	xunfeiRequest.Header.AppId = xunfeiAppId
 | 
			
		||||
	xunfeiRequest.Parameter.Chat.Domain = domain
 | 
			
		||||
	xunfeiRequest.Parameter.Chat.Temperature = request.Temperature
 | 
			
		||||
	xunfeiRequest.Parameter.Chat.TopK = request.N
 | 
			
		||||
	xunfeiRequest.Parameter.Chat.MaxTokens = request.MaxTokens
 | 
			
		||||
	xunfeiRequest.Payload.Message.Text = messages
 | 
			
		||||
 | 
			
		||||
	if request.N != nil {
 | 
			
		||||
		xunfeiRequest.Parameter.Chat.TopK = *request.N
 | 
			
		||||
	} else {
 | 
			
		||||
		xunfeiRequest.Parameter.Chat.TopK = 1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if strings.HasPrefix(domain, "generalv3") || domain == "4.0Ultra" {
 | 
			
		||||
		functions := make([]model.Function, len(request.Tools))
 | 
			
		||||
		for i, tool := range request.Tools {
 | 
			
		||||
 
 | 
			
		||||
@@ -80,7 +80,7 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
 | 
			
		||||
func (a *Adaptor) ConvertImageRequest(_ *gin.Context, request *model.ImageRequest) (any, error) {
 | 
			
		||||
	if request == nil {
 | 
			
		||||
		return nil, errors.New("request is nil")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -50,5 +50,8 @@ const (
 | 
			
		||||
	Replicate
 | 
			
		||||
	BaiduV2
 | 
			
		||||
	XunfeiV2
 | 
			
		||||
	AliBailian
 | 
			
		||||
	OpenAICompatible
 | 
			
		||||
	GeminiOpenAICompatible
 | 
			
		||||
	Dummy
 | 
			
		||||
)
 | 
			
		||||
 
 | 
			
		||||
@@ -50,6 +50,10 @@ var ChannelBaseURLs = []string{
 | 
			
		||||
	"https://api.replicate.com/v1/models/",      // 46
 | 
			
		||||
	"https://qianfan.baidubce.com",              // 47
 | 
			
		||||
	"https://spark-api-open.xf-yun.com",         // 48
 | 
			
		||||
	"https://dashscope.aliyuncs.com",            // 49
 | 
			
		||||
	"",                                          // 50
 | 
			
		||||
 | 
			
		||||
	"https://generativelanguage.googleapis.com/v1beta/openai/", // 51
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
 
 | 
			
		||||
@@ -8,18 +8,16 @@ import (
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/constant/role"
 | 
			
		||||
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/common"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/model"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/openai"
 | 
			
		||||
	billingratio "github.com/songquanpeng/one-api/relay/billing/ratio"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channeltype"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/constant/role"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/controller/validator"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/meta"
 | 
			
		||||
	relaymodel "github.com/songquanpeng/one-api/relay/model"
 | 
			
		||||
@@ -45,10 +43,10 @@ func getAndValidateTextRequest(c *gin.Context, relayMode int) (*relaymodel.Gener
 | 
			
		||||
	return textRequest, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getPromptTokens(textRequest *relaymodel.GeneralOpenAIRequest, relayMode int) int {
 | 
			
		||||
func getPromptTokens(ctx context.Context, textRequest *relaymodel.GeneralOpenAIRequest, relayMode int) int {
 | 
			
		||||
	switch relayMode {
 | 
			
		||||
	case relaymode.ChatCompletions:
 | 
			
		||||
		return openai.CountTokenMessages(textRequest.Messages, textRequest.Model)
 | 
			
		||||
		return openai.CountTokenMessages(ctx, textRequest.Messages, textRequest.Model)
 | 
			
		||||
	case relaymode.Completions:
 | 
			
		||||
		return openai.CountTokenInput(textRequest.Prompt, textRequest.Model)
 | 
			
		||||
	case relaymode.Moderations:
 | 
			
		||||
@@ -94,19 +92,30 @@ func preConsumeQuota(ctx context.Context, textRequest *relaymodel.GeneralOpenAIR
 | 
			
		||||
	return preConsumedQuota, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func postConsumeQuota(ctx context.Context, usage *relaymodel.Usage, meta *meta.Meta, textRequest *relaymodel.GeneralOpenAIRequest, ratio float64, preConsumedQuota int64, modelRatio float64, groupRatio float64, systemPromptReset bool) {
 | 
			
		||||
func postConsumeQuota(ctx context.Context,
 | 
			
		||||
	usage *relaymodel.Usage,
 | 
			
		||||
	meta *meta.Meta,
 | 
			
		||||
	textRequest *relaymodel.GeneralOpenAIRequest,
 | 
			
		||||
	ratio float64,
 | 
			
		||||
	preConsumedQuota int64,
 | 
			
		||||
	modelRatio float64,
 | 
			
		||||
	groupRatio float64,
 | 
			
		||||
	systemPromptReset bool) (quota int64) {
 | 
			
		||||
	if usage == nil {
 | 
			
		||||
		logger.Error(ctx, "usage is nil, which is unexpected")
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	var quota int64
 | 
			
		||||
	completionRatio := billingratio.GetCompletionRatio(textRequest.Model, meta.ChannelType)
 | 
			
		||||
	promptTokens := usage.PromptTokens
 | 
			
		||||
	// It appears that DeepSeek's official service automatically merges ReasoningTokens into CompletionTokens,
 | 
			
		||||
	// but the behavior of third-party providers may differ, so for now we do not add them manually.
 | 
			
		||||
	// completionTokens := usage.CompletionTokens + usage.CompletionTokensDetails.ReasoningTokens
 | 
			
		||||
	completionTokens := usage.CompletionTokens
 | 
			
		||||
	quota = int64(math.Ceil((float64(promptTokens) + float64(completionTokens)*completionRatio) * ratio))
 | 
			
		||||
	quota = int64(math.Ceil((float64(promptTokens)+float64(completionTokens)*completionRatio)*ratio)) + usage.ToolsCost
 | 
			
		||||
	if ratio != 0 && quota <= 0 {
 | 
			
		||||
		quota = 1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	totalTokens := promptTokens + completionTokens
 | 
			
		||||
	if totalTokens == 0 {
 | 
			
		||||
		// in this case, must be some error happened
 | 
			
		||||
@@ -122,7 +131,13 @@ func postConsumeQuota(ctx context.Context, usage *relaymodel.Usage, meta *meta.M
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		logger.Error(ctx, "error update user quota cache: "+err.Error())
 | 
			
		||||
	}
 | 
			
		||||
	logContent := fmt.Sprintf("倍率:%.2f × %.2f × %.2f", modelRatio, groupRatio, completionRatio)
 | 
			
		||||
 | 
			
		||||
	var logContent string
 | 
			
		||||
	if usage.ToolsCost == 0 {
 | 
			
		||||
		logContent = fmt.Sprintf("倍率:%.2f × %.2f × %.2f", modelRatio, groupRatio, completionRatio)
 | 
			
		||||
	} else {
 | 
			
		||||
		logContent = fmt.Sprintf("倍率:%.2f × %.2f × %.2f, tools cost %d", modelRatio, groupRatio, completionRatio, usage.ToolsCost)
 | 
			
		||||
	}
 | 
			
		||||
	model.RecordConsumeLog(ctx, &model.Log{
 | 
			
		||||
		UserId:            meta.UserId,
 | 
			
		||||
		ChannelId:         meta.ChannelId,
 | 
			
		||||
@@ -138,6 +153,8 @@ func postConsumeQuota(ctx context.Context, usage *relaymodel.Usage, meta *meta.M
 | 
			
		||||
	})
 | 
			
		||||
	model.UpdateUserUsedQuotaAndRequestCount(meta.UserId, quota)
 | 
			
		||||
	model.UpdateChannelUsedQuota(meta.ChannelId, quota)
 | 
			
		||||
 | 
			
		||||
	return quota
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getMappedModelName(modelName string, mapping map[string]string) (string, bool) {
 | 
			
		||||
 
 | 
			
		||||
@@ -157,7 +157,7 @@ func RelayImageHelper(c *gin.Context, relayMode int) *relaymodel.ErrorWithStatus
 | 
			
		||||
		channeltype.Ali,
 | 
			
		||||
		channeltype.Replicate,
 | 
			
		||||
		channeltype.Baidu:
 | 
			
		||||
		finalRequest, err := adaptor.ConvertImageRequest(imageRequest)
 | 
			
		||||
		finalRequest, err := adaptor.ConvertImageRequest(c, imageRequest)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return openai.ErrorWrapper(err, "convert_image_request_failed", http.StatusInternalServerError)
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
@@ -10,6 +10,7 @@ import (
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/ctxkey"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor"
 | 
			
		||||
@@ -44,7 +45,7 @@ func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode {
 | 
			
		||||
	groupRatio := billingratio.GetGroupRatio(meta.Group)
 | 
			
		||||
	ratio := modelRatio * groupRatio
 | 
			
		||||
	// pre-consume quota
 | 
			
		||||
	promptTokens := getPromptTokens(textRequest, meta.Mode)
 | 
			
		||||
	promptTokens := getPromptTokens(c.Request.Context(), textRequest, meta.Mode)
 | 
			
		||||
	meta.PromptTokens = promptTokens
 | 
			
		||||
	preConsumedQuota, bizErr := preConsumeQuota(ctx, textRequest, promptTokens, ratio, meta)
 | 
			
		||||
	if bizErr != nil {
 | 
			
		||||
@@ -104,6 +105,8 @@ func getRequestBody(c *gin.Context, meta *meta.Meta, textRequest *model.GeneralO
 | 
			
		||||
		logger.Debugf(c.Request.Context(), "converted request failed: %s\n", err.Error())
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	c.Set(ctxkey.ConvertedRequest, convertedRequest)
 | 
			
		||||
 | 
			
		||||
	jsonData, err := json.Marshal(convertedRequest)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		logger.Debugf(c.Request.Context(), "converted request json_marshal_failed: %s\n", err.Error())
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,7 @@
 | 
			
		||||
package model
 | 
			
		||||
 | 
			
		||||
import "github.com/songquanpeng/one-api/relay/adaptor/openrouter"
 | 
			
		||||
 | 
			
		||||
type ResponseFormat struct {
 | 
			
		||||
	Type       string      `json:"type,omitempty"`
 | 
			
		||||
	JsonSchema *JSONSchema `json:"json_schema,omitempty"`
 | 
			
		||||
@@ -23,49 +25,103 @@ type StreamOptions struct {
 | 
			
		||||
 | 
			
		||||
type GeneralOpenAIRequest struct {
 | 
			
		||||
	// https://platform.openai.com/docs/api-reference/chat/create
 | 
			
		||||
	Messages            []Message       `json:"messages,omitempty"`
 | 
			
		||||
	Model               string          `json:"model,omitempty"`
 | 
			
		||||
	Store               *bool           `json:"store,omitempty"`
 | 
			
		||||
	ReasoningEffort     *string         `json:"reasoning_effort,omitempty"`
 | 
			
		||||
	Metadata            any             `json:"metadata,omitempty"`
 | 
			
		||||
	FrequencyPenalty    *float64        `json:"frequency_penalty,omitempty"`
 | 
			
		||||
	LogitBias           any             `json:"logit_bias,omitempty"`
 | 
			
		||||
	Logprobs            *bool           `json:"logprobs,omitempty"`
 | 
			
		||||
	TopLogprobs         *int            `json:"top_logprobs,omitempty"`
 | 
			
		||||
	MaxTokens           int             `json:"max_tokens,omitempty"`
 | 
			
		||||
	MaxCompletionTokens *int            `json:"max_completion_tokens,omitempty"`
 | 
			
		||||
	N                   int             `json:"n,omitempty"`
 | 
			
		||||
	Modalities          []string        `json:"modalities,omitempty"`
 | 
			
		||||
	Prediction          any             `json:"prediction,omitempty"`
 | 
			
		||||
	Audio               *Audio          `json:"audio,omitempty"`
 | 
			
		||||
	PresencePenalty     *float64        `json:"presence_penalty,omitempty"`
 | 
			
		||||
	ResponseFormat      *ResponseFormat `json:"response_format,omitempty"`
 | 
			
		||||
	Seed                float64         `json:"seed,omitempty"`
 | 
			
		||||
	ServiceTier         *string         `json:"service_tier,omitempty"`
 | 
			
		||||
	Stop                any             `json:"stop,omitempty"`
 | 
			
		||||
	Stream              bool            `json:"stream,omitempty"`
 | 
			
		||||
	StreamOptions       *StreamOptions  `json:"stream_options,omitempty"`
 | 
			
		||||
	Temperature         *float64        `json:"temperature,omitempty"`
 | 
			
		||||
	TopP                *float64        `json:"top_p,omitempty"`
 | 
			
		||||
	TopK                int             `json:"top_k,omitempty"`
 | 
			
		||||
	Tools               []Tool          `json:"tools,omitempty"`
 | 
			
		||||
	ToolChoice          any             `json:"tool_choice,omitempty"`
 | 
			
		||||
	ParallelTooCalls    *bool           `json:"parallel_tool_calls,omitempty"`
 | 
			
		||||
	User                string          `json:"user,omitempty"`
 | 
			
		||||
	FunctionCall        any             `json:"function_call,omitempty"`
 | 
			
		||||
	Functions           any             `json:"functions,omitempty"`
 | 
			
		||||
	Messages []Message `json:"messages,omitempty"`
 | 
			
		||||
	Model    string    `json:"model,omitempty"`
 | 
			
		||||
	Store    *bool     `json:"store,omitempty"`
 | 
			
		||||
	Metadata any       `json:"metadata,omitempty"`
 | 
			
		||||
	// FrequencyPenalty is a number between -2.0 and 2.0 that penalizes
 | 
			
		||||
	// new tokens based on their existing frequency in the text so far,
 | 
			
		||||
	// default is 0.
 | 
			
		||||
	FrequencyPenalty    *float64 `json:"frequency_penalty,omitempty" binding:"omitempty,min=-2,max=2"`
 | 
			
		||||
	LogitBias           any      `json:"logit_bias,omitempty"`
 | 
			
		||||
	Logprobs            *bool    `json:"logprobs,omitempty"`
 | 
			
		||||
	TopLogprobs         *int     `json:"top_logprobs,omitempty"`
 | 
			
		||||
	MaxTokens           int      `json:"max_tokens,omitempty"`
 | 
			
		||||
	MaxCompletionTokens *int     `json:"max_completion_tokens,omitempty"`
 | 
			
		||||
	// N is how many chat completion choices to generate for each input message,
 | 
			
		||||
	// default to 1.
 | 
			
		||||
	N *int `json:"n,omitempty" binding:"omitempty,min=0"`
 | 
			
		||||
	// ReasoningEffort constrains effort on reasoning for reasoning models, reasoning models only.
 | 
			
		||||
	ReasoningEffort *string `json:"reasoning_effort,omitempty" binding:"omitempty,oneof=low medium high"`
 | 
			
		||||
	// Modalities currently the model only programmatically allows modalities = [“text”, “audio”]
 | 
			
		||||
	Modalities []string `json:"modalities,omitempty"`
 | 
			
		||||
	Prediction any      `json:"prediction,omitempty"`
 | 
			
		||||
	Audio      *Audio   `json:"audio,omitempty"`
 | 
			
		||||
	// PresencePenalty is a number between -2.0 and 2.0 that penalizes
 | 
			
		||||
	// new tokens based on whether they appear in the text so far, default is 0.
 | 
			
		||||
	PresencePenalty  *float64        `json:"presence_penalty,omitempty" binding:"omitempty,min=-2,max=2"`
 | 
			
		||||
	ResponseFormat   *ResponseFormat `json:"response_format,omitempty"`
 | 
			
		||||
	Seed             float64         `json:"seed,omitempty"`
 | 
			
		||||
	ServiceTier      *string         `json:"service_tier,omitempty" binding:"omitempty,oneof=default auto"`
 | 
			
		||||
	Stop             any             `json:"stop,omitempty"`
 | 
			
		||||
	Stream           bool            `json:"stream,omitempty"`
 | 
			
		||||
	StreamOptions    *StreamOptions  `json:"stream_options,omitempty"`
 | 
			
		||||
	Temperature      *float64        `json:"temperature,omitempty"`
 | 
			
		||||
	TopP             *float64        `json:"top_p,omitempty"`
 | 
			
		||||
	TopK             int             `json:"top_k,omitempty"`
 | 
			
		||||
	Tools            []Tool          `json:"tools,omitempty"`
 | 
			
		||||
	ToolChoice       any             `json:"tool_choice,omitempty"`
 | 
			
		||||
	ParallelTooCalls *bool           `json:"parallel_tool_calls,omitempty"`
 | 
			
		||||
	User             string          `json:"user,omitempty"`
 | 
			
		||||
	FunctionCall     any             `json:"function_call,omitempty"`
 | 
			
		||||
	Functions        any             `json:"functions,omitempty"`
 | 
			
		||||
	// https://platform.openai.com/docs/api-reference/embeddings/create
 | 
			
		||||
	Input          any    `json:"input,omitempty"`
 | 
			
		||||
	EncodingFormat string `json:"encoding_format,omitempty"`
 | 
			
		||||
	Dimensions     int    `json:"dimensions,omitempty"`
 | 
			
		||||
	// https://platform.openai.com/docs/api-reference/images/create
 | 
			
		||||
	Prompt  any     `json:"prompt,omitempty"`
 | 
			
		||||
	Quality *string `json:"quality,omitempty"`
 | 
			
		||||
	Size    string  `json:"size,omitempty"`
 | 
			
		||||
	Style   *string `json:"style,omitempty"`
 | 
			
		||||
	Prompt           string            `json:"prompt,omitempty"`
 | 
			
		||||
	Quality          *string           `json:"quality,omitempty"`
 | 
			
		||||
	Size             string            `json:"size,omitempty"`
 | 
			
		||||
	Style            *string           `json:"style,omitempty"`
 | 
			
		||||
	WebSearchOptions *WebSearchOptions `json:"web_search_options,omitempty"`
 | 
			
		||||
 | 
			
		||||
	// Others
 | 
			
		||||
	Instruction string `json:"instruction,omitempty"`
 | 
			
		||||
	NumCtx      int    `json:"num_ctx,omitempty"`
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	// Openrouter
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	Provider         *openrouter.RequestProvider `json:"provider,omitempty"`
 | 
			
		||||
	IncludeReasoning *bool                       `json:"include_reasoning,omitempty"`
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	// Anthropic
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	Thinking *Thinking `json:"thinking,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WebSearchOptions is the tool searches the web for relevant results to use in a response.
 | 
			
		||||
type WebSearchOptions struct {
 | 
			
		||||
	// SearchContextSize is the high level guidance for the amount of context window space to use for the search,
 | 
			
		||||
	// default is "medium".
 | 
			
		||||
	SearchContextSize *string       `json:"search_context_size,omitempty" binding:"omitempty,oneof=low medium high"`
 | 
			
		||||
	UserLocation      *UserLocation `json:"user_location,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UserLocation is a struct that contains the location of the user.
 | 
			
		||||
type UserLocation struct {
 | 
			
		||||
	// Approximate is the approximate location parameters for the search.
 | 
			
		||||
	Approximate UserLocationApproximate `json:"approximate" binding:"required"`
 | 
			
		||||
	// Type is the type of location approximation.
 | 
			
		||||
	Type string `json:"type" binding:"required,oneof=approximate"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UserLocationApproximate is a struct that contains the approximate location of the user.
 | 
			
		||||
type UserLocationApproximate struct {
 | 
			
		||||
	// City is the city of the user, e.g. San Francisco.
 | 
			
		||||
	City *string `json:"city,omitempty"`
 | 
			
		||||
	// Country is the country of the user, e.g. US.
 | 
			
		||||
	Country *string `json:"country,omitempty"`
 | 
			
		||||
	// Region is the region of the user, e.g. California.
 | 
			
		||||
	Region *string `json:"region,omitempty"`
 | 
			
		||||
	// Timezone is the IANA timezone of the user, e.g. America/Los_Angeles.
 | 
			
		||||
	Timezone *string `json:"timezone,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#implementing-extended-thinking
 | 
			
		||||
type Thinking struct {
 | 
			
		||||
	Type         string `json:"type"`
 | 
			
		||||
	BudgetTokens int    `json:"budget_tokens" binding:"omitempty,min=1024"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r GeneralOpenAIRequest) ParseInput() []string {
 | 
			
		||||
 
 | 
			
		||||
@@ -1,12 +1,106 @@
 | 
			
		||||
package model
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/logger"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// ReasoningFormat is the format of reasoning content,
 | 
			
		||||
// can be set by the reasoning_format parameter in the request url.
 | 
			
		||||
type ReasoningFormat string
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	ReasoningFormatUnspecified ReasoningFormat = ""
 | 
			
		||||
	// ReasoningFormatReasoningContent is the reasoning format used by deepseek official API
 | 
			
		||||
	ReasoningFormatReasoningContent ReasoningFormat = "reasoning_content"
 | 
			
		||||
	// ReasoningFormatReasoning is the reasoning format used by openrouter
 | 
			
		||||
	ReasoningFormatReasoning ReasoningFormat = "reasoning"
 | 
			
		||||
 | 
			
		||||
	// ReasoningFormatThinkTag is the reasoning format used by 3rd party deepseek-r1 providers.
 | 
			
		||||
	//
 | 
			
		||||
	// Deprecated: I believe <think> is a very poor format, especially in stream mode, it is difficult to extract and convert.
 | 
			
		||||
	// Considering that only a few deepseek-r1 third-party providers use this format, it has been decided to no longer support it.
 | 
			
		||||
	// ReasoningFormatThinkTag ReasoningFormat = "think-tag"
 | 
			
		||||
 | 
			
		||||
	// ReasoningFormatThinking is the reasoning format used by anthropic
 | 
			
		||||
	ReasoningFormatThinking ReasoningFormat = "thinking"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type Message struct {
 | 
			
		||||
	Role             string  `json:"role,omitempty"`
 | 
			
		||||
	Content          any     `json:"content,omitempty"`
 | 
			
		||||
	ReasoningContent any     `json:"reasoning_content,omitempty"`
 | 
			
		||||
	Name             *string `json:"name,omitempty"`
 | 
			
		||||
	ToolCalls        []Tool  `json:"tool_calls,omitempty"`
 | 
			
		||||
	ToolCallId       string  `json:"tool_call_id,omitempty"`
 | 
			
		||||
	Role string `json:"role,omitempty"`
 | 
			
		||||
	// Content is a string or a list of objects
 | 
			
		||||
	Content    any              `json:"content,omitempty"`
 | 
			
		||||
	Name       *string          `json:"name,omitempty"`
 | 
			
		||||
	ToolCalls  []Tool           `json:"tool_calls,omitempty"`
 | 
			
		||||
	ToolCallId string           `json:"tool_call_id,omitempty"`
 | 
			
		||||
	Audio      *messageAudio    `json:"audio,omitempty"`
 | 
			
		||||
	Annotation []AnnotationItem `json:"annotation,omitempty"`
 | 
			
		||||
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	// Deepseek 专有的一些字段
 | 
			
		||||
	// https://api-docs.deepseek.com/api/create-chat-completion
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	// Prefix forces the model to begin its answer with the supplied prefix in the assistant message.
 | 
			
		||||
	// To enable this feature, set base_url to "https://api.deepseek.com/beta".
 | 
			
		||||
	Prefix *bool `json:"prefix,omitempty"` // ReasoningContent is Used for the deepseek-reasoner model in the Chat
 | 
			
		||||
	// Prefix Completion feature as the input for the CoT in the last assistant message.
 | 
			
		||||
	// When using this feature, the prefix parameter must be set to true.
 | 
			
		||||
	ReasoningContent *string `json:"reasoning_content,omitempty"`
 | 
			
		||||
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	// Openrouter
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	Reasoning *string `json:"reasoning,omitempty"`
 | 
			
		||||
	Refusal   *bool   `json:"refusal,omitempty"`
 | 
			
		||||
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	// Anthropic
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	Thinking  *string `json:"thinking,omitempty"`
 | 
			
		||||
	Signature *string `json:"signature,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type AnnotationItem struct {
 | 
			
		||||
	Type        string      `json:"type" binding:"oneof=url_citation"`
 | 
			
		||||
	UrlCitation UrlCitation `json:"url_citation"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UrlCitation is a URL citation when using web search.
 | 
			
		||||
type UrlCitation struct {
 | 
			
		||||
	// Endpoint is the index of the last character of the URL citation in the message.
 | 
			
		||||
	EndIndex int `json:"end_index"`
 | 
			
		||||
	// StartIndex is the index of the first character of the URL citation in the message.
 | 
			
		||||
	StartIndex int `json:"start_index"`
 | 
			
		||||
	// Title is the title of the web resource.
 | 
			
		||||
	Title string `json:"title"`
 | 
			
		||||
	// Url is the URL of the web resource.
 | 
			
		||||
	Url string `json:"url"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// SetReasoningContent sets the reasoning content based on the format
 | 
			
		||||
func (m *Message) SetReasoningContent(format string, reasoningContent string) {
 | 
			
		||||
	switch ReasoningFormat(strings.ToLower(strings.TrimSpace(format))) {
 | 
			
		||||
	case ReasoningFormatReasoningContent:
 | 
			
		||||
		m.ReasoningContent = &reasoningContent
 | 
			
		||||
		// case ReasoningFormatThinkTag:
 | 
			
		||||
		// 	m.Content = fmt.Sprintf("<think>%s</think>%s", reasoningContent, m.Content)
 | 
			
		||||
	case ReasoningFormatThinking:
 | 
			
		||||
		m.Thinking = &reasoningContent
 | 
			
		||||
	case ReasoningFormatReasoning,
 | 
			
		||||
		ReasoningFormatUnspecified:
 | 
			
		||||
		m.Reasoning = &reasoningContent
 | 
			
		||||
	default:
 | 
			
		||||
		logger.Warnf(context.TODO(), "unknown reasoning format: %q", format)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type messageAudio struct {
 | 
			
		||||
	Id         string `json:"id"`
 | 
			
		||||
	Data       string `json:"data,omitempty"`
 | 
			
		||||
	ExpiredAt  int    `json:"expired_at,omitempty"`
 | 
			
		||||
	Transcript string `json:"transcript,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (m Message) IsStringContent() bool {
 | 
			
		||||
@@ -27,6 +121,7 @@ func (m Message) StringContent() string {
 | 
			
		||||
			if !ok {
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if contentMap["type"] == ContentTypeText {
 | 
			
		||||
				if subStr, ok := contentMap["text"].(string); ok {
 | 
			
		||||
					contentStr += subStr
 | 
			
		||||
@@ -35,6 +130,7 @@ func (m Message) StringContent() string {
 | 
			
		||||
		}
 | 
			
		||||
		return contentStr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return ""
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -44,10 +140,11 @@ func (m Message) ParseContent() []MessageContent {
 | 
			
		||||
	if ok {
 | 
			
		||||
		contentList = append(contentList, MessageContent{
 | 
			
		||||
			Type: ContentTypeText,
 | 
			
		||||
			Text: content,
 | 
			
		||||
			Text: &content,
 | 
			
		||||
		})
 | 
			
		||||
		return contentList
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	anyList, ok := m.Content.([]any)
 | 
			
		||||
	if ok {
 | 
			
		||||
		for _, contentItem := range anyList {
 | 
			
		||||
@@ -60,7 +157,7 @@ func (m Message) ParseContent() []MessageContent {
 | 
			
		||||
				if subStr, ok := contentMap["text"].(string); ok {
 | 
			
		||||
					contentList = append(contentList, MessageContent{
 | 
			
		||||
						Type: ContentTypeText,
 | 
			
		||||
						Text: subStr,
 | 
			
		||||
						Text: &subStr,
 | 
			
		||||
					})
 | 
			
		||||
				}
 | 
			
		||||
			case ContentTypeImageURL:
 | 
			
		||||
@@ -72,8 +169,21 @@ func (m Message) ParseContent() []MessageContent {
 | 
			
		||||
						},
 | 
			
		||||
					})
 | 
			
		||||
				}
 | 
			
		||||
			case ContentTypeInputAudio:
 | 
			
		||||
				if subObj, ok := contentMap["input_audio"].(map[string]any); ok {
 | 
			
		||||
					contentList = append(contentList, MessageContent{
 | 
			
		||||
						Type: ContentTypeInputAudio,
 | 
			
		||||
						InputAudio: &InputAudio{
 | 
			
		||||
							Data:   subObj["data"].(string),
 | 
			
		||||
							Format: subObj["format"].(string),
 | 
			
		||||
						},
 | 
			
		||||
					})
 | 
			
		||||
				}
 | 
			
		||||
			default:
 | 
			
		||||
				logger.Warnf(context.TODO(), "unknown content type: %s", contentMap["type"])
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return contentList
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
@@ -85,7 +195,23 @@ type ImageURL struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type MessageContent struct {
 | 
			
		||||
	Type     string    `json:"type,omitempty"`
 | 
			
		||||
	Text     string    `json:"text"`
 | 
			
		||||
	ImageURL *ImageURL `json:"image_url,omitempty"`
 | 
			
		||||
	// Type should be one of the following: text/input_audio
 | 
			
		||||
	Type       string      `json:"type,omitempty"`
 | 
			
		||||
	Text       *string     `json:"text,omitempty"`
 | 
			
		||||
	ImageURL   *ImageURL   `json:"image_url,omitempty"`
 | 
			
		||||
	InputAudio *InputAudio `json:"input_audio,omitempty"`
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	// Anthropic
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	Thinking  *string `json:"thinking,omitempty"`
 | 
			
		||||
	Signature *string `json:"signature,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type InputAudio struct {
 | 
			
		||||
	// Data is the base64 encoded audio data
 | 
			
		||||
	Data string `json:"data" binding:"required"`
 | 
			
		||||
	// Format is the audio format, should be one of the
 | 
			
		||||
	// following: mp3/mp4/mpeg/mpga/m4a/wav/webm/pcm16.
 | 
			
		||||
	// When stream=true, format should be pcm16
 | 
			
		||||
	Format string `json:"format"`
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,17 +1,22 @@
 | 
			
		||||
package model
 | 
			
		||||
 | 
			
		||||
// Usage is the token usage information returned by OpenAI API.
 | 
			
		||||
type Usage struct {
 | 
			
		||||
	PromptTokens     int `json:"prompt_tokens"`
 | 
			
		||||
	CompletionTokens int `json:"completion_tokens"`
 | 
			
		||||
	TotalTokens      int `json:"total_tokens"`
 | 
			
		||||
	// PromptTokensDetails may be empty for some models
 | 
			
		||||
	PromptTokensDetails *usagePromptTokensDetails `json:"prompt_tokens_details,omitempty"`
 | 
			
		||||
	// CompletionTokensDetails may be empty for some models
 | 
			
		||||
	CompletionTokensDetails *usageCompletionTokensDetails `json:"completion_tokens_details,omitempty"`
 | 
			
		||||
	ServiceTier             string                        `json:"service_tier,omitempty"`
 | 
			
		||||
	SystemFingerprint       string                        `json:"system_fingerprint,omitempty"`
 | 
			
		||||
 | 
			
		||||
	CompletionTokensDetails *CompletionTokensDetails `json:"completion_tokens_details,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type CompletionTokensDetails struct {
 | 
			
		||||
	ReasoningTokens          int `json:"reasoning_tokens"`
 | 
			
		||||
	AcceptedPredictionTokens int `json:"accepted_prediction_tokens"`
 | 
			
		||||
	RejectedPredictionTokens int `json:"rejected_prediction_tokens"`
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	// Custom fields
 | 
			
		||||
	// -------------------------------------
 | 
			
		||||
	// ToolsCost is the cost of using tools, in quota.
 | 
			
		||||
	ToolsCost int64 `json:"tools_cost,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Error struct {
 | 
			
		||||
@@ -25,3 +30,20 @@ type ErrorWithStatusCode struct {
 | 
			
		||||
	Error
 | 
			
		||||
	StatusCode int `json:"status_code"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type usagePromptTokensDetails struct {
 | 
			
		||||
	CachedTokens int `json:"cached_tokens"`
 | 
			
		||||
	AudioTokens  int `json:"audio_tokens"`
 | 
			
		||||
	// TextTokens could be zero for pure text chats
 | 
			
		||||
	TextTokens  int `json:"text_tokens"`
 | 
			
		||||
	ImageTokens int `json:"image_tokens"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type usageCompletionTokensDetails struct {
 | 
			
		||||
	ReasoningTokens          int `json:"reasoning_tokens"`
 | 
			
		||||
	AudioTokens              int `json:"audio_tokens"`
 | 
			
		||||
	AcceptedPredictionTokens int `json:"accepted_prediction_tokens"`
 | 
			
		||||
	RejectedPredictionTokens int `json:"rejected_prediction_tokens"`
 | 
			
		||||
	// TextTokens could be zero for pure text chats
 | 
			
		||||
	TextTokens int `json:"text_tokens"`
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -13,4 +13,5 @@ const (
 | 
			
		||||
	AudioTranslation
 | 
			
		||||
	// Proxy is a special relay mode for proxying requests to custom upstream
 | 
			
		||||
	Proxy
 | 
			
		||||
	ImagesEdits
 | 
			
		||||
)
 | 
			
		||||
 
 | 
			
		||||
@@ -245,7 +245,7 @@ const LogsTable = () => {
 | 
			
		||||
    if (isAdminUser) {
 | 
			
		||||
      url = `/api/log/?p=${startIdx}&page_size=${pageSize}&type=${logType}&username=${username}&token_name=${token_name}&model_name=${model_name}&start_timestamp=${localStartTimestamp}&end_timestamp=${localEndTimestamp}&channel=${channel}`;
 | 
			
		||||
    } else {
 | 
			
		||||
      url = `/api/log/self/?p=${startIdx}&page_size=${pageSize}&type=${logType}&token_name=${token_name}&model_name=${model_name}&start_timestamp=${localStartTimestamp}&end_timestamp=${localEndTimestamp}`;
 | 
			
		||||
      url = `/api/log/self?p=${startIdx}&page_size=${pageSize}&type=${logType}&token_name=${token_name}&model_name=${model_name}&start_timestamp=${localStartTimestamp}&end_timestamp=${localEndTimestamp}`;
 | 
			
		||||
    }
 | 
			
		||||
    const res = await API.get(url);
 | 
			
		||||
    const { success, message, data } = res.data;
 | 
			
		||||
 
 | 
			
		||||
@@ -44,6 +44,9 @@ function renderType(type, t) {
 | 
			
		||||
function renderBalance(type, balance, t) {
 | 
			
		||||
  switch (type) {
 | 
			
		||||
    case 1: // OpenAI
 | 
			
		||||
        if (balance === 0) {
 | 
			
		||||
            return <span>{t('channel.table.balance_not_supported')}</span>;
 | 
			
		||||
        }
 | 
			
		||||
      return <span>${balance.toFixed(2)}</span>;
 | 
			
		||||
    case 4: // CloseAI
 | 
			
		||||
      return <span>¥{balance.toFixed(2)}</span>;
 | 
			
		||||
@@ -108,7 +111,7 @@ const ChannelsTable = () => {
 | 
			
		||||
 | 
			
		||||
  const loadChannels = async (startIdx) => {
 | 
			
		||||
    const res = await API.get(`/api/channel/?p=${startIdx}`);
 | 
			
		||||
    const {success, message, data} = res.data;
 | 
			
		||||
    const { success, message, data } = res.data;
 | 
			
		||||
    if (success) {
 | 
			
		||||
      let localChannels = data.map(processChannelData);
 | 
			
		||||
      if (startIdx === 0) {
 | 
			
		||||
@@ -490,7 +493,6 @@ const ChannelsTable = () => {
 | 
			
		||||
              onClick={() => {
 | 
			
		||||
                sortChannel('balance');
 | 
			
		||||
              }}
 | 
			
		||||
              hidden={!showDetail}
 | 
			
		||||
            >
 | 
			
		||||
              {t('channel.table.balance')}
 | 
			
		||||
            </Table.HeaderCell>
 | 
			
		||||
@@ -499,6 +501,7 @@ const ChannelsTable = () => {
 | 
			
		||||
              onClick={() => {
 | 
			
		||||
                sortChannel('priority');
 | 
			
		||||
              }}
 | 
			
		||||
              hidden={!showDetail}
 | 
			
		||||
            >
 | 
			
		||||
              {t('channel.table.priority')}
 | 
			
		||||
            </Table.HeaderCell>
 | 
			
		||||
@@ -538,7 +541,7 @@ const ChannelsTable = () => {
 | 
			
		||||
                      basic
 | 
			
		||||
                    />
 | 
			
		||||
                  </Table.Cell>
 | 
			
		||||
                  <Table.Cell hidden={!showDetail}>
 | 
			
		||||
                  <Table.Cell>
 | 
			
		||||
                    <Popup
 | 
			
		||||
                      trigger={
 | 
			
		||||
                        <span
 | 
			
		||||
@@ -554,7 +557,7 @@ const ChannelsTable = () => {
 | 
			
		||||
                      basic
 | 
			
		||||
                    />
 | 
			
		||||
                  </Table.Cell>
 | 
			
		||||
                  <Table.Cell>
 | 
			
		||||
                  <Table.Cell hidden={!showDetail}>
 | 
			
		||||
                    <Popup
 | 
			
		||||
                      trigger={
 | 
			
		||||
                        <Input
 | 
			
		||||
@@ -588,7 +591,15 @@ const ChannelsTable = () => {
 | 
			
		||||
                    />
 | 
			
		||||
                  </Table.Cell>
 | 
			
		||||
                  <Table.Cell>
 | 
			
		||||
                    <div>
 | 
			
		||||
                    <div
 | 
			
		||||
                      style={{
 | 
			
		||||
                        display: 'flex',
 | 
			
		||||
                        alignItems: 'center',
 | 
			
		||||
                        flexWrap: 'wrap',
 | 
			
		||||
                        gap: '2px',
 | 
			
		||||
                        rowGap: '6px',
 | 
			
		||||
                      }}
 | 
			
		||||
                    >
 | 
			
		||||
                      <Button
 | 
			
		||||
                        size={'tiny'}
 | 
			
		||||
                        positive
 | 
			
		||||
 
 | 
			
		||||
@@ -225,7 +225,7 @@ const LogsTable = () => {
 | 
			
		||||
    if (isAdminUser) {
 | 
			
		||||
      url = `/api/log/?p=${startIdx}&type=${logType}&username=${username}&token_name=${token_name}&model_name=${model_name}&start_timestamp=${localStartTimestamp}&end_timestamp=${localEndTimestamp}&channel=${channel}`;
 | 
			
		||||
    } else {
 | 
			
		||||
      url = `/api/log/self/?p=${startIdx}&type=${logType}&token_name=${token_name}&model_name=${model_name}&start_timestamp=${localStartTimestamp}&end_timestamp=${localEndTimestamp}`;
 | 
			
		||||
      url = `/api/log/self?p=${startIdx}&type=${logType}&token_name=${token_name}&model_name=${model_name}&start_timestamp=${localStartTimestamp}&end_timestamp=${localEndTimestamp}`;
 | 
			
		||||
    }
 | 
			
		||||
    const res = await API.get(url);
 | 
			
		||||
    const { success, message, data } = res.data;
 | 
			
		||||
 
 | 
			
		||||
@@ -1,12 +1,26 @@
 | 
			
		||||
export const CHANNEL_OPTIONS = [
 | 
			
		||||
  {key: 1, text: 'OpenAI', value: 1, color: 'green'},
 | 
			
		||||
  {key: 14, text: 'Anthropic Claude', value: 14, color: 'black'},
 | 
			
		||||
  {key: 33, text: 'AWS', value: 33, color: 'black'},
 | 
			
		||||
  {key: 3, text: 'Azure OpenAI', value: 3, color: 'olive'},
 | 
			
		||||
  {key: 11, text: 'Google PaLM2', value: 11, color: 'orange'},
 | 
			
		||||
  {key: 24, text: 'Google Gemini', value: 24, color: 'orange'},
 | 
			
		||||
  {key: 28, text: 'Mistral AI', value: 28, color: 'orange'},
 | 
			
		||||
  {key: 41, text: 'Novita', value: 41, color: 'purple'},
 | 
			
		||||
  { key: 1, text: 'OpenAI', value: 1, color: 'green' },
 | 
			
		||||
  {
 | 
			
		||||
    key: 50,
 | 
			
		||||
    text: 'OpenAI 兼容',
 | 
			
		||||
    value: 50,
 | 
			
		||||
    color: 'olive',
 | 
			
		||||
    description: 'OpenAI 兼容渠道,支持设置 Base URL',
 | 
			
		||||
  },
 | 
			
		||||
  {key: 14, text: 'Anthropic', value: 14, color: 'black'},
 | 
			
		||||
  { key: 33, text: 'AWS', value: 33, color: 'black' },
 | 
			
		||||
  {key: 3, text: 'Azure', value: 3, color: 'olive'},
 | 
			
		||||
  {key: 11, text: 'PaLM2', value: 11, color: 'orange'},
 | 
			
		||||
  {key: 24, text: 'Gemini', value: 24, color: 'orange'},
 | 
			
		||||
  {
 | 
			
		||||
    key: 51,
 | 
			
		||||
    text: 'Gemini (OpenAI)',
 | 
			
		||||
    value: 51,
 | 
			
		||||
    color: 'orange',
 | 
			
		||||
    description: 'Gemini OpenAI 兼容格式',
 | 
			
		||||
  },
 | 
			
		||||
  { key: 28, text: 'Mistral AI', value: 28, color: 'orange' },
 | 
			
		||||
  { key: 41, text: 'Novita', value: 41, color: 'purple' },
 | 
			
		||||
  {
 | 
			
		||||
    key: 40,
 | 
			
		||||
    text: '字节火山引擎',
 | 
			
		||||
@@ -28,7 +42,14 @@ export const CHANNEL_OPTIONS = [
 | 
			
		||||
    color: 'blue',
 | 
			
		||||
    tip: '请前往<a href="https://console.bce.baidu.com/iam/#/iam/apikey/list" target="_blank">此处</a>获取 API Key,注意本渠道仅支持<a target="_blank" href="https://cloud.baidu.com/doc/WENXINWORKSHOP/s/em4tsqo3v">推理服务 V2</a>相关模型',
 | 
			
		||||
  },
 | 
			
		||||
  {key: 17, text: '阿里通义千问', value: 17, color: 'orange'},
 | 
			
		||||
  {
 | 
			
		||||
    key: 17,
 | 
			
		||||
    text: '阿里通义千问',
 | 
			
		||||
    value: 17,
 | 
			
		||||
    color: 'orange',
 | 
			
		||||
    tip: '如需使用阿里云百炼,请使用<strong>阿里云百炼</strong>渠道',
 | 
			
		||||
  },
 | 
			
		||||
  { key: 49, text: '阿里云百炼', value: 49, color: 'orange' },
 | 
			
		||||
  {
 | 
			
		||||
    key: 18,
 | 
			
		||||
    text: '讯飞星火认知',
 | 
			
		||||
@@ -43,38 +64,45 @@ export const CHANNEL_OPTIONS = [
 | 
			
		||||
    color: 'blue',
 | 
			
		||||
    tip: 'HTTP 版本的讯飞接口,前往<a href="https://console.xfyun.cn/services/cbm" target="_blank">此处</a>获取 HTTP 服务接口认证密钥',
 | 
			
		||||
  },
 | 
			
		||||
  {key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet'},
 | 
			
		||||
  {key: 19, text: '360 智脑', value: 19, color: 'blue'},
 | 
			
		||||
  {key: 25, text: 'Moonshot AI', value: 25, color: 'black'},
 | 
			
		||||
  {key: 23, text: '腾讯混元', value: 23, color: 'teal'},
 | 
			
		||||
  {key: 26, text: '百川大模型', value: 26, color: 'orange'},
 | 
			
		||||
  {key: 27, text: 'MiniMax', value: 27, color: 'red'},
 | 
			
		||||
  {key: 29, text: 'Groq', value: 29, color: 'orange'},
 | 
			
		||||
  {key: 30, text: 'Ollama', value: 30, color: 'black'},
 | 
			
		||||
  {key: 31, text: '零一万物', value: 31, color: 'green'},
 | 
			
		||||
  {key: 32, text: '阶跃星辰', value: 32, color: 'blue'},
 | 
			
		||||
  {key: 34, text: 'Coze', value: 34, color: 'blue'},
 | 
			
		||||
  {key: 35, text: 'Cohere', value: 35, color: 'blue'},
 | 
			
		||||
  {key: 36, text: 'DeepSeek', value: 36, color: 'black'},
 | 
			
		||||
  {key: 37, text: 'Cloudflare', value: 37, color: 'orange'},
 | 
			
		||||
  {key: 38, text: 'DeepL', value: 38, color: 'black'},
 | 
			
		||||
  {key: 39, text: 'together.ai', value: 39, color: 'blue'},
 | 
			
		||||
  {key: 42, text: 'VertexAI', value: 42, color: 'blue'},
 | 
			
		||||
  {key: 43, text: 'Proxy', value: 43, color: 'blue'},
 | 
			
		||||
  {key: 44, text: 'SiliconFlow', value: 44, color: 'blue'},
 | 
			
		||||
  {key: 45, text: 'xAI', value: 45, color: 'blue'},
 | 
			
		||||
  {key: 46, text: 'Replicate', value: 46, color: 'blue'},
 | 
			
		||||
  {key: 8, text: '自定义渠道', value: 8, color: 'pink'},
 | 
			
		||||
  {key: 22, text: '知识库:FastGPT', value: 22, color: 'blue'},
 | 
			
		||||
  {key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple'},
 | 
			
		||||
    {key: 20, text: 'OpenRouter', value: 20, color: 'black'},
 | 
			
		||||
  {key: 2, text: '代理:API2D', value: 2, color: 'blue'},
 | 
			
		||||
  {key: 5, text: '代理:OpenAI-SB', value: 5, color: 'brown'},
 | 
			
		||||
  {key: 7, text: '代理:OhMyGPT', value: 7, color: 'purple'},
 | 
			
		||||
  {key: 10, text: '代理:AI Proxy', value: 10, color: 'purple'},
 | 
			
		||||
  {key: 4, text: '代理:CloseAI', value: 4, color: 'teal'},
 | 
			
		||||
  {key: 6, text: '代理:OpenAI Max', value: 6, color: 'violet'},
 | 
			
		||||
  {key: 9, text: '代理:AI.LS', value: 9, color: 'yellow'},
 | 
			
		||||
  {key: 12, text: '代理:API2GPT', value: 12, color: 'blue'},
 | 
			
		||||
  {key: 13, text: '代理:AIGC2D', value: 13, color: 'purple'},
 | 
			
		||||
  { key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet' },
 | 
			
		||||
  { key: 19, text: '360 智脑', value: 19, color: 'blue' },
 | 
			
		||||
  { key: 25, text: 'Moonshot AI', value: 25, color: 'black' },
 | 
			
		||||
  { key: 23, text: '腾讯混元', value: 23, color: 'teal' },
 | 
			
		||||
  { key: 26, text: '百川大模型', value: 26, color: 'orange' },
 | 
			
		||||
  { key: 27, text: 'MiniMax', value: 27, color: 'red' },
 | 
			
		||||
  { key: 29, text: 'Groq', value: 29, color: 'orange' },
 | 
			
		||||
  { key: 30, text: 'Ollama', value: 30, color: 'black' },
 | 
			
		||||
  { key: 31, text: '零一万物', value: 31, color: 'green' },
 | 
			
		||||
  { key: 32, text: '阶跃星辰', value: 32, color: 'blue' },
 | 
			
		||||
  { key: 34, text: 'Coze', value: 34, color: 'blue' },
 | 
			
		||||
  { key: 35, text: 'Cohere', value: 35, color: 'blue' },
 | 
			
		||||
  { key: 36, text: 'DeepSeek', value: 36, color: 'black' },
 | 
			
		||||
  { key: 37, text: 'Cloudflare', value: 37, color: 'orange' },
 | 
			
		||||
  { key: 38, text: 'DeepL', value: 38, color: 'black' },
 | 
			
		||||
  { key: 39, text: 'together.ai', value: 39, color: 'blue' },
 | 
			
		||||
  { key: 42, text: 'VertexAI', value: 42, color: 'blue' },
 | 
			
		||||
  { key: 43, text: 'Proxy', value: 43, color: 'blue' },
 | 
			
		||||
  { key: 44, text: 'SiliconFlow', value: 44, color: 'blue' },
 | 
			
		||||
  { key: 45, text: 'xAI', value: 45, color: 'blue' },
 | 
			
		||||
  { key: 46, text: 'Replicate', value: 46, color: 'blue' },
 | 
			
		||||
  {
 | 
			
		||||
    key: 8,
 | 
			
		||||
    text: '自定义渠道',
 | 
			
		||||
    value: 8,
 | 
			
		||||
    color: 'pink',
 | 
			
		||||
    tip: '不推荐使用,请使用 <strong>OpenAI 兼容</strong>渠道类型。注意,这里所需要填入的代理地址仅会在实际请求时替换域名部分,如果你想填入 OpenAI SDK 中所要求的 Base URL,请使用 OpenAI 兼容渠道类型',
 | 
			
		||||
    description: '不推荐使用,请使用 OpenAI 兼容渠道类型',
 | 
			
		||||
  },
 | 
			
		||||
  { key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' },
 | 
			
		||||
  { key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' },
 | 
			
		||||
  { key: 20, text: 'OpenRouter', value: 20, color: 'black' },
 | 
			
		||||
  { key: 2, text: '代理:API2D', value: 2, color: 'blue' },
 | 
			
		||||
  { key: 5, text: '代理:OpenAI-SB', value: 5, color: 'brown' },
 | 
			
		||||
  { key: 7, text: '代理:OhMyGPT', value: 7, color: 'purple' },
 | 
			
		||||
  { key: 10, text: '代理:AI Proxy', value: 10, color: 'purple' },
 | 
			
		||||
  { key: 4, text: '代理:CloseAI', value: 4, color: 'teal' },
 | 
			
		||||
  { key: 6, text: '代理:OpenAI Max', value: 6, color: 'violet' },
 | 
			
		||||
  { key: 9, text: '代理:AI.LS', value: 9, color: 'yellow' },
 | 
			
		||||
  { key: 12, text: '代理:API2GPT', value: 12, color: 'blue' },
 | 
			
		||||
  { key: 13, text: '代理:AIGC2D', value: 13, color: 'purple' },
 | 
			
		||||
];
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,5 @@
 | 
			
		||||
import {Label, Message} from 'semantic-ui-react';
 | 
			
		||||
import {getChannelOption} from './helper';
 | 
			
		||||
import { Label, Message } from 'semantic-ui-react';
 | 
			
		||||
import { getChannelOption } from './helper';
 | 
			
		||||
import React from 'react';
 | 
			
		||||
 | 
			
		||||
export function renderText(text, limit) {
 | 
			
		||||
@@ -16,7 +16,15 @@ export function renderGroup(group) {
 | 
			
		||||
  let groups = group.split(',');
 | 
			
		||||
  groups.sort();
 | 
			
		||||
  return (
 | 
			
		||||
    <>
 | 
			
		||||
    <div
 | 
			
		||||
      style={{
 | 
			
		||||
        display: 'flex',
 | 
			
		||||
        alignItems: 'center',
 | 
			
		||||
        flexWrap: 'wrap',
 | 
			
		||||
        gap: '2px',
 | 
			
		||||
        rowGap: '6px',
 | 
			
		||||
      }}
 | 
			
		||||
    >
 | 
			
		||||
      {groups.map((group) => {
 | 
			
		||||
        if (group === 'vip' || group === 'pro') {
 | 
			
		||||
          return <Label color='yellow'>{group}</Label>;
 | 
			
		||||
@@ -25,7 +33,7 @@ export function renderGroup(group) {
 | 
			
		||||
        }
 | 
			
		||||
        return <Label>{group}</Label>;
 | 
			
		||||
      })}
 | 
			
		||||
    </>
 | 
			
		||||
    </div>
 | 
			
		||||
  );
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -106,8 +114,8 @@ export function renderChannelTip(channelId) {
 | 
			
		||||
    return <></>;
 | 
			
		||||
  }
 | 
			
		||||
  return (
 | 
			
		||||
      <Message>
 | 
			
		||||
        <div dangerouslySetInnerHTML={{__html: channel.tip}}></div>
 | 
			
		||||
      </Message>
 | 
			
		||||
    <Message>
 | 
			
		||||
      <div dangerouslySetInnerHTML={{ __html: channel.tip }}></div>
 | 
			
		||||
    </Message>
 | 
			
		||||
  );
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,7 +1,7 @@
 | 
			
		||||
import { toast } from 'react-toastify';
 | 
			
		||||
import { toastConstants } from '../constants';
 | 
			
		||||
import {toast} from 'react-toastify';
 | 
			
		||||
import {toastConstants} from '../constants';
 | 
			
		||||
import React from 'react';
 | 
			
		||||
import { API } from './api';
 | 
			
		||||
import {API} from './api';
 | 
			
		||||
 | 
			
		||||
const HTMLToastContent = ({ htmlContent }) => {
 | 
			
		||||
  return <div dangerouslySetInnerHTML={{ __html: htmlContent }} />;
 | 
			
		||||
@@ -74,6 +74,7 @@ if (isMobile()) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export function showError(error) {
 | 
			
		||||
  if (!error) return;
 | 
			
		||||
  console.error(error);
 | 
			
		||||
  if (error.message) {
 | 
			
		||||
    if (error.name === 'AxiosError') {
 | 
			
		||||
@@ -158,17 +159,7 @@ export function timestamp2string(timestamp) {
 | 
			
		||||
    second = '0' + second;
 | 
			
		||||
  }
 | 
			
		||||
  return (
 | 
			
		||||
    year +
 | 
			
		||||
    '-' +
 | 
			
		||||
    month +
 | 
			
		||||
    '-' +
 | 
			
		||||
    day +
 | 
			
		||||
    ' ' +
 | 
			
		||||
    hour +
 | 
			
		||||
    ':' +
 | 
			
		||||
    minute +
 | 
			
		||||
    ':' +
 | 
			
		||||
    second
 | 
			
		||||
      year + '-' + month + '-' + day + ' ' + hour + ':' + minute + ':' + second
 | 
			
		||||
  );
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -193,7 +184,6 @@ export const verifyJSON = (str) => {
 | 
			
		||||
export function shouldShowPrompt(id) {
 | 
			
		||||
  let prompt = localStorage.getItem(`prompt-${id}`);
 | 
			
		||||
  return !prompt;
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export function setPromptShown(id) {
 | 
			
		||||
@@ -224,4 +214,4 @@ export function getChannelModels(type) {
 | 
			
		||||
    return channelModels[type];
 | 
			
		||||
  }
 | 
			
		||||
  return [];
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -104,8 +104,10 @@
 | 
			
		||||
      "model_mapping_placeholder": "Optional, used to modify model names in request body. A JSON string where keys are request model names and values are target model names",
 | 
			
		||||
      "system_prompt": "System Prompt",
 | 
			
		||||
      "system_prompt_placeholder": "Optional, used to force set system prompt. Use with custom model & model mapping. First create a unique custom model name above, then map it to a natively supported model",
 | 
			
		||||
      "base_url": "Proxy",
 | 
			
		||||
      "base_url_placeholder": "Optional, used for API calls through proxy. Enter proxy address in format: https://domain.com",
 | 
			
		||||
      "proxy_url": "Proxy",
 | 
			
		||||
      "proxy_url_placeholder": "This is optional and used for API calls via a proxy. Please enter the proxy URL, formatted as: https://domain.com",
 | 
			
		||||
      "base_url": "Base URL",
 | 
			
		||||
      "base_url_placeholder": "The Base URL required by the OpenAPI SDK",
 | 
			
		||||
      "key": "Key",
 | 
			
		||||
      "key_placeholder": "Please enter key",
 | 
			
		||||
      "batch": "Batch Create",
 | 
			
		||||
 
 | 
			
		||||
@@ -104,8 +104,10 @@
 | 
			
		||||
      "model_mapping_placeholder": "此项可选,用于修改请求体中的模型名称,为一个 JSON 字符串,键为请求中模型名称,值为要替换的模型名称",
 | 
			
		||||
      "system_prompt": "系统提示词",
 | 
			
		||||
      "system_prompt_placeholder": "此项可选,用于强制设置给定的系统提示词,请配合自定义模型 & 模型重定向使用,首先创建一个唯一的自定义模型名称并在上面填入,之后将该自定义模型重定向映射到该渠道一个原生支持的模型",
 | 
			
		||||
      "base_url": "代理",
 | 
			
		||||
      "base_url_placeholder": "此项可选,用于通过代理站来进行 API 调用,请输入代理站地址,格式为:https://domain.com",
 | 
			
		||||
      "proxy_url": "代理",
 | 
			
		||||
      "proxy_url_placeholder": "此项可选,用于通过代理站来进行 API 调用,请输入代理站地址,格式为:https://domain.com。注意,这里所需要填入的代理地址仅会在实际请求时替换域名部分,如果你想填入 OpenAI SDK 中所要求的 Base URL,请使用 OpenAI 兼容渠道类型",
 | 
			
		||||
      "base_url": "Base URL",
 | 
			
		||||
      "base_url_placeholder": "OpenAPI SDK 中所要求的 Base URL",
 | 
			
		||||
      "key": "密钥",
 | 
			
		||||
      "key_placeholder": "请输入密钥",
 | 
			
		||||
      "batch": "批量创建",
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,6 @@
 | 
			
		||||
import React, {useEffect, useState} from 'react';
 | 
			
		||||
import {useTranslation} from 'react-i18next';
 | 
			
		||||
import {Button, Card, Form, Input, Message,} from 'semantic-ui-react';
 | 
			
		||||
import {Button, Card, Form, Input, Message} from 'semantic-ui-react';
 | 
			
		||||
import {useNavigate, useParams} from 'react-router-dom';
 | 
			
		||||
import {API, copy, getChannelModels, showError, showInfo, showSuccess, verifyJSON,} from '../../helpers';
 | 
			
		||||
import {CHANNEL_OPTIONS} from '../../constants';
 | 
			
		||||
@@ -339,6 +339,20 @@ const EditChannel = () => {
 | 
			
		||||
            {inputs.type === 8 && (
 | 
			
		||||
              <Form.Field>
 | 
			
		||||
                <Form.Input
 | 
			
		||||
                    required
 | 
			
		||||
                    label={t('channel.edit.proxy_url')}
 | 
			
		||||
                    name='base_url'
 | 
			
		||||
                    placeholder={t('channel.edit.proxy_url_placeholder')}
 | 
			
		||||
                    onChange={handleInputChange}
 | 
			
		||||
                    value={inputs.base_url}
 | 
			
		||||
                    autoComplete='new-password'
 | 
			
		||||
                />
 | 
			
		||||
              </Form.Field>
 | 
			
		||||
            )}
 | 
			
		||||
            {inputs.type === 50 && (
 | 
			
		||||
                <Form.Field>
 | 
			
		||||
                  <Form.Input
 | 
			
		||||
                      required
 | 
			
		||||
                  label={t('channel.edit.base_url')}
 | 
			
		||||
                  name='base_url'
 | 
			
		||||
                  placeholder={t('channel.edit.base_url_placeholder')}
 | 
			
		||||
@@ -637,12 +651,13 @@ const EditChannel = () => {
 | 
			
		||||
            {inputs.type !== 3 &&
 | 
			
		||||
              inputs.type !== 33 &&
 | 
			
		||||
              inputs.type !== 8 &&
 | 
			
		||||
                inputs.type !== 50 &&
 | 
			
		||||
              inputs.type !== 22 && (
 | 
			
		||||
                <Form.Field>
 | 
			
		||||
                  <Form.Input
 | 
			
		||||
                    label={t('channel.edit.base_url')}
 | 
			
		||||
                      label={t('channel.edit.proxy_url')}
 | 
			
		||||
                    name='base_url'
 | 
			
		||||
                    placeholder={t('channel.edit.base_url_placeholder')}
 | 
			
		||||
                      placeholder={t('channel.edit.proxy_url_placeholder')}
 | 
			
		||||
                    onChange={handleInputChange}
 | 
			
		||||
                    value={inputs.base_url}
 | 
			
		||||
                    autoComplete='new-password'
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,6 @@
 | 
			
		||||
import React, { useEffect, useState } from 'react';
 | 
			
		||||
import { useTranslation } from 'react-i18next';
 | 
			
		||||
import { Card, Grid } from 'semantic-ui-react';
 | 
			
		||||
import React, {useEffect, useState} from 'react';
 | 
			
		||||
import {useTranslation} from 'react-i18next';
 | 
			
		||||
import {Card, Grid} from 'semantic-ui-react';
 | 
			
		||||
import {
 | 
			
		||||
  Bar,
 | 
			
		||||
  BarChart,
 | 
			
		||||
@@ -122,11 +122,11 @@ const Dashboard = () => {
 | 
			
		||||
        ? new Date(Math.min(...dates.map((d) => new Date(d))))
 | 
			
		||||
        : new Date();
 | 
			
		||||
 | 
			
		||||
    // 确保至少显示5天的数据
 | 
			
		||||
    const fiveDaysAgo = new Date();
 | 
			
		||||
    fiveDaysAgo.setDate(fiveDaysAgo.getDate() - 4); // -4是因为包含今天
 | 
			
		||||
    if (minDate > fiveDaysAgo) {
 | 
			
		||||
      minDate = fiveDaysAgo;
 | 
			
		||||
    // 确保至少显示7天的数据
 | 
			
		||||
    const sevenDaysAgo = new Date();
 | 
			
		||||
    sevenDaysAgo.setDate(sevenDaysAgo.getDate() - 6); // -6是因为包含今天
 | 
			
		||||
    if (minDate > sevenDaysAgo) {
 | 
			
		||||
      minDate = sevenDaysAgo;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // 生成所有日期
 | 
			
		||||
@@ -164,11 +164,11 @@ const Dashboard = () => {
 | 
			
		||||
        ? new Date(Math.min(...dates.map((d) => new Date(d))))
 | 
			
		||||
        : new Date();
 | 
			
		||||
 | 
			
		||||
    // 确保至少显示5天的数据
 | 
			
		||||
    const fiveDaysAgo = new Date();
 | 
			
		||||
    fiveDaysAgo.setDate(fiveDaysAgo.getDate() - 4); // -4是因为包含今天
 | 
			
		||||
    if (minDate > fiveDaysAgo) {
 | 
			
		||||
      minDate = fiveDaysAgo;
 | 
			
		||||
    // 确保至少显示7天的数据
 | 
			
		||||
    const sevenDaysAgo = new Date();
 | 
			
		||||
    sevenDaysAgo.setDate(sevenDaysAgo.getDate() - 6); // -6是因为包含今天
 | 
			
		||||
    if (minDate > sevenDaysAgo) {
 | 
			
		||||
      minDate = sevenDaysAgo;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // 生成所有日期
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user