mirror of
				https://github.com/songquanpeng/one-api.git
				synced 2025-10-27 11:53:42 +08:00 
			
		
		
		
	Compare commits
	
		
			30 Commits
		
	
	
		
			v0.6.11-pr
			...
			v0.6.11-pr
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 3f421c4f04 | ||
|  | 1ce6a226f6 | ||
|  | cafd0a0327 | ||
|  | 8b8cd03e85 | ||
|  | 54c38de813 | ||
|  | d6284bf6b0 | ||
|  | df5d2ca93d | ||
|  | fef7ae048b | ||
|  | 6916debf66 | ||
|  | 53da209134 | ||
|  | 517f6ad211 | ||
|  | 10aba11f18 | ||
|  | 4d011c5f98 | ||
|  | eb96aa635e | ||
|  | c715f2bc1d | ||
|  | aed090dd55 | ||
|  | 696265774e | ||
|  | 974729426d | ||
|  | 57c1367ec8 | ||
|  | 44233d5c04 | ||
|  | bf45a955c3 | ||
|  | 20435fcbfc | ||
|  | 6e7a1c2323 | ||
|  | dd65b997dd | ||
|  | 0b6d03d6c6 | ||
|  | 4375246e24 | ||
|  | 3e3b8230ac | ||
|  | 07808122a6 | ||
|  | c96895e35b | ||
|  | 2552c68249 | 
							
								
								
									
										4
									
								
								.github/workflows/docker-image.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/docker-image.yml
									
									
									
									
										vendored
									
									
								
							| @@ -62,9 +62,7 @@ jobs: | ||||
|         uses: docker/build-push-action@v3 | ||||
|         with: | ||||
|           context: . | ||||
|           platforms: ${{ contains(github.ref, 'alpha') && 'linux/amd64' || 'linux/amd64,linux/arm64' }} | ||||
|           platforms: ${{ contains(github.ref, 'alpha') && 'linux/amd64' || 'linux/amd64' }} | ||||
|           push: true | ||||
|           tags: ${{ steps.meta.outputs.tags }} | ||||
|           labels: ${{ steps.meta.outputs.labels }} | ||||
|           build-args: | | ||||
|             TARGETARCH=${{ startsWith(matrix.platform, 'linux/arm64') && 'arm64' || 'amd64' }} | ||||
| @@ -24,8 +24,7 @@ RUN apk add --no-cache \ | ||||
|  | ||||
| ENV GO111MODULE=on \ | ||||
|     CGO_ENABLED=1 \ | ||||
|     GOOS=linux \ | ||||
|     GOARCH=$TARGETARCH | ||||
|     GOOS=linux | ||||
|  | ||||
| WORKDIR /build | ||||
|  | ||||
|   | ||||
| @@ -315,6 +315,7 @@ If the channel ID is not provided, load balancing will be used to distribute the | ||||
| * [FastGPT](https://github.com/labring/FastGPT): Knowledge question answering system based on the LLM | ||||
| * [VChart](https://github.com/VisActor/VChart):  More than just a cross-platform charting library, but also an expressive data storyteller. | ||||
| * [VMind](https://github.com/VisActor/VMind):  Not just automatic, but also fantastic. Open-source solution for intelligent visualization. | ||||
| * * [CherryStudio](https://github.com/CherryHQ/cherry-studio):  A cross-platform AI client that integrates multiple service providers and supports local knowledge base management. | ||||
|  | ||||
| ## Note | ||||
| This project is an open-source project. Please use it in compliance with OpenAI's [Terms of Use](https://openai.com/policies/terms-of-use) and **applicable laws and regulations**. It must not be used for illegal purposes. | ||||
|   | ||||
| @@ -287,8 +287,8 @@ graph LR | ||||
|     + インターフェイスアドレスと API Key が正しいか再確認してください。 | ||||
|  | ||||
| ## 関連プロジェクト | ||||
| [FastGPT](https://github.com/labring/FastGPT): LLM に基づく知識質問応答システム | ||||
|  | ||||
| * [FastGPT](https://github.com/labring/FastGPT): LLM に基づく知識質問応答システム | ||||
| * [CherryStudio](https://github.com/CherryHQ/cherry-studio):  マルチプラットフォーム対応のAIクライアント。複数のサービスプロバイダーを統合管理し、ローカル知識ベースをサポートします。 | ||||
| ## 注 | ||||
| 本プロジェクトはオープンソースプロジェクトです。OpenAI の[利用規約](https://openai.com/policies/terms-of-use)および**適用される法令**を遵守してご利用ください。違法な目的での利用はご遠慮ください。 | ||||
|  | ||||
|   | ||||
| @@ -115,7 +115,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用  | ||||
| 19. 支持丰富的**自定义**设置, | ||||
|     1. 支持自定义系统名称,logo 以及页脚。 | ||||
|     2. 支持自定义首页和关于页面,可以选择使用 HTML & Markdown 代码进行自定义,或者使用一个单独的网页通过 iframe 嵌入。 | ||||
| 20. 支持通过系统访问令牌调用管理 API,进而**在无需二开的情况下扩展和自定义** One API 的功能,详情请参考此处 [API 文档](./docs/API.md)。。 | ||||
| 20. 支持通过系统访问令牌调用管理 API,进而**在无需二开的情况下扩展和自定义** One API 的功能,详情请参考此处 [API 文档](./docs/API.md)。 | ||||
| 21. 支持 Cloudflare Turnstile 用户校验。 | ||||
| 22. 支持用户管理,支持**多种用户登录注册方式**: | ||||
|     + 邮箱登录注册(支持注册邮箱白名单)以及通过邮箱进行密码重置。 | ||||
| @@ -469,6 +469,7 @@ https://openai.justsong.cn | ||||
| * [ChatGPT Next Web](https://github.com/Yidadaa/ChatGPT-Next-Web):  一键拥有你自己的跨平台 ChatGPT 应用 | ||||
| * [VChart](https://github.com/VisActor/VChart):  不只是开箱即用的多端图表库,更是生动灵活的数据故事讲述者。 | ||||
| * [VMind](https://github.com/VisActor/VMind):  不仅自动,还很智能。开源智能可视化解决方案。 | ||||
| * [CherryStudio](https://github.com/CherryHQ/cherry-studio):  全平台支持的AI客户端, 多服务商集成管理、本地知识库支持。 | ||||
|  | ||||
| ## 注意 | ||||
|  | ||||
|   | ||||
| @@ -163,4 +163,4 @@ var UserContentRequestProxy = env.String("USER_CONTENT_REQUEST_PROXY", "") | ||||
| var UserContentRequestTimeout = env.Int("USER_CONTENT_REQUEST_TIMEOUT", 30) | ||||
|  | ||||
| var EnforceIncludeUsage = env.Bool("ENFORCE_INCLUDE_USAGE", false) | ||||
| var TestPrompt = env.String("TEST_PROMPT", "Print your model name exactly and do not output without any other text.") | ||||
| var TestPrompt = env.String("TEST_PROMPT", "Output only your specific model name with no additional text.") | ||||
|   | ||||
| @@ -93,6 +93,9 @@ func Error(ctx context.Context, msg string) { | ||||
| } | ||||
|  | ||||
| func Debugf(ctx context.Context, format string, a ...any) { | ||||
| 	if !config.DebugEnabled { | ||||
| 		return | ||||
| 	} | ||||
| 	logHelper(ctx, loggerDEBUG, fmt.Sprintf(format, a...)) | ||||
| } | ||||
|  | ||||
|   | ||||
							
								
								
									
										13
									
								
								common/utils/array.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								common/utils/array.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,13 @@ | ||||
| package utils | ||||
|  | ||||
| func DeDuplication(slice []string) []string { | ||||
| 	m := make(map[string]bool) | ||||
| 	for _, v := range slice { | ||||
| 		m[v] = true | ||||
| 	} | ||||
| 	result := make([]string, 0, len(m)) | ||||
| 	for v := range m { | ||||
| 		result = append(result, v) | ||||
| 	} | ||||
| 	return result | ||||
| } | ||||
| @@ -112,6 +112,13 @@ type DeepSeekUsageResponse struct { | ||||
| 	} `json:"balance_infos"` | ||||
| } | ||||
|  | ||||
| type OpenRouterResponse struct { | ||||
| 	Data struct { | ||||
| 		TotalCredits float64 `json:"total_credits"` | ||||
| 		TotalUsage   float64 `json:"total_usage"` | ||||
| 	} `json:"data"` | ||||
| } | ||||
|  | ||||
| // GetAuthHeader get auth header | ||||
| func GetAuthHeader(token string) http.Header { | ||||
| 	h := http.Header{} | ||||
| @@ -285,6 +292,22 @@ func updateChannelDeepSeekBalance(channel *model.Channel) (float64, error) { | ||||
| 	return balance, nil | ||||
| } | ||||
|  | ||||
| func updateChannelOpenRouterBalance(channel *model.Channel) (float64, error) { | ||||
| 	url := "https://openrouter.ai/api/v1/credits" | ||||
| 	body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key)) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 	response := OpenRouterResponse{} | ||||
| 	err = json.Unmarshal(body, &response) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 	balance := response.Data.TotalCredits - response.Data.TotalUsage | ||||
| 	channel.UpdateBalance(balance) | ||||
| 	return balance, nil | ||||
| } | ||||
|  | ||||
| func updateChannelBalance(channel *model.Channel) (float64, error) { | ||||
| 	baseURL := channeltype.ChannelBaseURLs[channel.Type] | ||||
| 	if channel.GetBaseURL() == "" { | ||||
| @@ -313,6 +336,8 @@ func updateChannelBalance(channel *model.Channel) (float64, error) { | ||||
| 		return updateChannelSiliconFlowBalance(channel) | ||||
| 	case channeltype.DeepSeek: | ||||
| 		return updateChannelDeepSeekBalance(channel) | ||||
| 	case channeltype.OpenRouter: | ||||
| 		return updateChannelOpenRouterBalance(channel) | ||||
| 	default: | ||||
| 		return 0, errors.New("尚未实现") | ||||
| 	} | ||||
|   | ||||
| @@ -153,6 +153,7 @@ func testChannel(ctx context.Context, channel *model.Channel, request *relaymode | ||||
| 	rawResponse := w.Body.String() | ||||
| 	_, responseMessage, err = parseTestResponse(rawResponse) | ||||
| 	if err != nil { | ||||
| 		logger.SysError(fmt.Sprintf("failed to parse error: %s, \nresponse: %s", err.Error(), rawResponse)) | ||||
| 		return "", err, nil | ||||
| 	} | ||||
| 	result := w.Result() | ||||
|   | ||||
| @@ -2,10 +2,13 @@ package model | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"github.com/songquanpeng/one-api/common" | ||||
| 	"gorm.io/gorm" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
|  | ||||
| 	"gorm.io/gorm" | ||||
|  | ||||
| 	"github.com/songquanpeng/one-api/common" | ||||
| 	"github.com/songquanpeng/one-api/common/utils" | ||||
| ) | ||||
|  | ||||
| type Ability struct { | ||||
| @@ -49,6 +52,7 @@ func GetRandomSatisfiedChannel(group string, model string, ignoreFirstPriority b | ||||
|  | ||||
| func (channel *Channel) AddAbilities() error { | ||||
| 	models_ := strings.Split(channel.Models, ",") | ||||
| 	models_ = utils.DeDuplication(models_) | ||||
| 	groups_ := strings.Split(channel.Group, ",") | ||||
| 	abilities := make([]Ability, 0, len(models_)) | ||||
| 	for _, model := range models_ { | ||||
|   | ||||
							
								
								
									
										20
									
								
								relay/adaptor/alibailian/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								relay/adaptor/alibailian/constants.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,20 @@ | ||||
| package alibailian | ||||
|  | ||||
| // https://help.aliyun.com/zh/model-studio/getting-started/models | ||||
|  | ||||
| var ModelList = []string{ | ||||
| 	"qwen-turbo", | ||||
| 	"qwen-plus", | ||||
| 	"qwen-long", | ||||
| 	"qwen-max", | ||||
| 	"qwen-coder-plus", | ||||
| 	"qwen-coder-plus-latest", | ||||
| 	"qwen-coder-turbo", | ||||
| 	"qwen-coder-turbo-latest", | ||||
| 	"qwen-mt-plus", | ||||
| 	"qwen-mt-turbo", | ||||
| 	"qwq-32b-preview", | ||||
|  | ||||
| 	"deepseek-r1", | ||||
| 	"deepseek-v3", | ||||
| } | ||||
							
								
								
									
										19
									
								
								relay/adaptor/alibailian/main.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								relay/adaptor/alibailian/main.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,19 @@ | ||||
| package alibailian | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/songquanpeng/one-api/relay/meta" | ||||
| 	"github.com/songquanpeng/one-api/relay/relaymode" | ||||
| ) | ||||
|  | ||||
| func GetRequestURL(meta *meta.Meta) (string, error) { | ||||
| 	switch meta.Mode { | ||||
| 	case relaymode.ChatCompletions: | ||||
| 		return fmt.Sprintf("%s/compatible-mode/v1/chat/completions", meta.BaseURL), nil | ||||
| 	case relaymode.Embeddings: | ||||
| 		return fmt.Sprintf("%s/compatible-mode/v1/embeddings", meta.BaseURL), nil | ||||
| 	default: | ||||
| 	} | ||||
| 	return "", fmt.Errorf("unsupported relay mode %d for ali bailian", meta.Mode) | ||||
| } | ||||
							
								
								
									
										30
									
								
								relay/adaptor/baiduv2/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										30
									
								
								relay/adaptor/baiduv2/constants.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,30 @@ | ||||
| package baiduv2 | ||||
|  | ||||
| // https://console.bce.baidu.com/support/?_=1692863460488×tamp=1739074632076#/api?product=QIANFAN&project=%E5%8D%83%E5%B8%86ModelBuilder&parent=%E5%AF%B9%E8%AF%9DChat%20V2&api=v2%2Fchat%2Fcompletions&method=post | ||||
| // https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Fm2vrveyu#%E6%94%AF%E6%8C%81%E6%A8%A1%E5%9E%8B%E5%88%97%E8%A1%A8 | ||||
|  | ||||
| var ModelList = []string{ | ||||
| 	"ernie-4.0-8k-latest", | ||||
| 	"ernie-4.0-8k-preview", | ||||
| 	"ernie-4.0-8k", | ||||
| 	"ernie-4.0-turbo-8k-latest", | ||||
| 	"ernie-4.0-turbo-8k-preview", | ||||
| 	"ernie-4.0-turbo-8k", | ||||
| 	"ernie-4.0-turbo-128k", | ||||
| 	"ernie-3.5-8k-preview", | ||||
| 	"ernie-3.5-8k", | ||||
| 	"ernie-3.5-128k", | ||||
| 	"ernie-speed-8k", | ||||
| 	"ernie-speed-128k", | ||||
| 	"ernie-speed-pro-128k", | ||||
| 	"ernie-lite-8k", | ||||
| 	"ernie-lite-pro-128k", | ||||
| 	"ernie-tiny-8k", | ||||
| 	"ernie-char-8k", | ||||
| 	"ernie-char-fiction-8k", | ||||
| 	"ernie-novel-8k", | ||||
| 	"deepseek-v3", | ||||
| 	"deepseek-r1", | ||||
| 	"deepseek-r1-distill-qwen-32b", | ||||
| 	"deepseek-r1-distill-qwen-14b", | ||||
| } | ||||
							
								
								
									
										17
									
								
								relay/adaptor/baiduv2/main.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								relay/adaptor/baiduv2/main.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,17 @@ | ||||
| package baiduv2 | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/songquanpeng/one-api/relay/meta" | ||||
| 	"github.com/songquanpeng/one-api/relay/relaymode" | ||||
| ) | ||||
|  | ||||
| func GetRequestURL(meta *meta.Meta) (string, error) { | ||||
| 	switch meta.Mode { | ||||
| 	case relaymode.ChatCompletions: | ||||
| 		return fmt.Sprintf("%s/v2/chat/completions", meta.BaseURL), nil | ||||
| 	default: | ||||
| 	} | ||||
| 	return "", fmt.Errorf("unsupported relay mode %d for baidu v2", meta.Mode) | ||||
| } | ||||
| @@ -5,9 +5,10 @@ import ( | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/gin-gonic/gin" | ||||
|  | ||||
| 	"github.com/songquanpeng/one-api/common/config" | ||||
| 	"github.com/songquanpeng/one-api/common/helper" | ||||
| 	channelhelper "github.com/songquanpeng/one-api/relay/adaptor" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/openai" | ||||
| @@ -20,17 +21,12 @@ type Adaptor struct { | ||||
| } | ||||
|  | ||||
| func (a *Adaptor) Init(meta *meta.Meta) { | ||||
|  | ||||
| } | ||||
|  | ||||
| func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { | ||||
| 	var defaultVersion string | ||||
| 	switch meta.ActualModelName { | ||||
| 	case "gemini-2.0-flash-exp", | ||||
| 		"gemini-2.0-flash-thinking-exp", | ||||
| 		"gemini-2.0-flash-thinking-exp-01-21": | ||||
| 		defaultVersion = "v1beta" | ||||
| 	default: | ||||
| 	defaultVersion := config.GeminiVersion | ||||
| 	if strings.Contains(meta.ActualModelName, "gemini-2.0") || | ||||
| 		strings.Contains(meta.ActualModelName, "gemini-1.5") { | ||||
| 		defaultVersion = "v1beta" | ||||
| 	} | ||||
|  | ||||
|   | ||||
| @@ -1,11 +1,35 @@ | ||||
| package gemini | ||||
|  | ||||
| import ( | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/geminiv2" | ||||
| ) | ||||
|  | ||||
| // https://ai.google.dev/models/gemini | ||||
|  | ||||
| var ModelList = []string{ | ||||
| 	"gemini-pro", "gemini-1.0-pro", | ||||
| 	"gemini-1.5-flash", "gemini-1.5-pro", | ||||
| 	"text-embedding-004", "aqa", | ||||
| 	"gemini-2.0-flash-exp", | ||||
| 	"gemini-2.0-flash-thinking-exp", "gemini-2.0-flash-thinking-exp-01-21", | ||||
| var ModelList = geminiv2.ModelList | ||||
|  | ||||
| // ModelsSupportSystemInstruction is the list of models that support system instruction. | ||||
| // | ||||
| // https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/system-instructions | ||||
| var ModelsSupportSystemInstruction = []string{ | ||||
| 	// "gemini-1.0-pro-002", | ||||
| 	// "gemini-1.5-flash", "gemini-1.5-flash-001", "gemini-1.5-flash-002", | ||||
| 	// "gemini-1.5-flash-8b", | ||||
| 	// "gemini-1.5-pro", "gemini-1.5-pro-001", "gemini-1.5-pro-002", | ||||
| 	// "gemini-1.5-pro-experimental", | ||||
| 	"gemini-2.0-flash", "gemini-2.0-flash-exp", | ||||
| 	"gemini-2.0-flash-thinking-exp-01-21", | ||||
| } | ||||
|  | ||||
| // IsModelSupportSystemInstruction check if the model support system instruction. | ||||
| // | ||||
| // Because the main version of Go is 1.20, slice.Contains cannot be used | ||||
| func IsModelSupportSystemInstruction(model string) bool { | ||||
| 	for _, m := range ModelsSupportSystemInstruction { | ||||
| 		if m == model { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return false | ||||
| } | ||||
|   | ||||
| @@ -132,9 +132,16 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest { | ||||
| 		} | ||||
| 		// Converting system prompt to prompt from user for the same reason | ||||
| 		if content.Role == "system" { | ||||
| 			content.Role = "user" | ||||
| 			shouldAddDummyModelMessage = true | ||||
| 			if IsModelSupportSystemInstruction(textRequest.Model) { | ||||
| 				geminiRequest.SystemInstruction = &content | ||||
| 				geminiRequest.SystemInstruction.Role = "" | ||||
| 				continue | ||||
| 			} else { | ||||
| 				content.Role = "user" | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		geminiRequest.Contents = append(geminiRequest.Contents, content) | ||||
|  | ||||
| 		// If a system message is the last message, we need to add a dummy model message to make gemini happy | ||||
|   | ||||
| @@ -1,10 +1,11 @@ | ||||
| package gemini | ||||
|  | ||||
| type ChatRequest struct { | ||||
| 	Contents         []ChatContent        `json:"contents"` | ||||
| 	SafetySettings   []ChatSafetySettings `json:"safety_settings,omitempty"` | ||||
| 	GenerationConfig ChatGenerationConfig `json:"generation_config,omitempty"` | ||||
| 	Tools            []ChatTools          `json:"tools,omitempty"` | ||||
| 	Contents          []ChatContent        `json:"contents"` | ||||
| 	SafetySettings    []ChatSafetySettings `json:"safety_settings,omitempty"` | ||||
| 	GenerationConfig  ChatGenerationConfig `json:"generation_config,omitempty"` | ||||
| 	Tools             []ChatTools          `json:"tools,omitempty"` | ||||
| 	SystemInstruction *ChatContent         `json:"system_instruction,omitempty"` | ||||
| } | ||||
|  | ||||
| type EmbeddingRequest struct { | ||||
|   | ||||
							
								
								
									
										15
									
								
								relay/adaptor/geminiv2/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								relay/adaptor/geminiv2/constants.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | ||||
| package geminiv2 | ||||
|  | ||||
| // https://ai.google.dev/models/gemini | ||||
|  | ||||
| var ModelList = []string{ | ||||
| 	"gemini-pro", "gemini-1.0-pro", | ||||
| 	// "gemma-2-2b-it", "gemma-2-9b-it", "gemma-2-27b-it", | ||||
| 	"gemini-1.5-flash", "gemini-1.5-flash-8b", | ||||
| 	"gemini-1.5-pro", "gemini-1.5-pro-experimental", | ||||
| 	"text-embedding-004", "aqa", | ||||
| 	"gemini-2.0-flash", "gemini-2.0-flash-exp", | ||||
| 	"gemini-2.0-flash-lite-preview-02-05", | ||||
| 	"gemini-2.0-flash-thinking-exp-01-21", | ||||
| 	"gemini-2.0-pro-exp-02-05", | ||||
| } | ||||
							
								
								
									
										14
									
								
								relay/adaptor/geminiv2/main.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								relay/adaptor/geminiv2/main.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,14 @@ | ||||
| package geminiv2 | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/songquanpeng/one-api/relay/meta" | ||||
| ) | ||||
|  | ||||
| func GetRequestURL(meta *meta.Meta) (string, error) { | ||||
| 	baseURL := strings.TrimSuffix(meta.BaseURL, "/") | ||||
| 	requestPath := strings.TrimPrefix(meta.RequestURLPath, "/v1") | ||||
| 	return fmt.Sprintf("%s%s", baseURL, requestPath), nil | ||||
| } | ||||
| @@ -8,4 +8,6 @@ var ModelList = []string{ | ||||
| 	"abab6-chat", | ||||
| 	"abab5.5-chat", | ||||
| 	"abab5.5s-chat", | ||||
| 	"MiniMax-VL-01", | ||||
| 	"MiniMax-Text-01", | ||||
| } | ||||
|   | ||||
| @@ -8,8 +8,12 @@ import ( | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/gin-gonic/gin" | ||||
|  | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/alibailian" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/baiduv2" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/doubao" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/geminiv2" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/minimax" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/novita" | ||||
| 	"github.com/songquanpeng/one-api/relay/channeltype" | ||||
| @@ -52,6 +56,12 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { | ||||
| 		return doubao.GetRequestURL(meta) | ||||
| 	case channeltype.Novita: | ||||
| 		return novita.GetRequestURL(meta) | ||||
| 	case channeltype.BaiduV2: | ||||
| 		return baiduv2.GetRequestURL(meta) | ||||
| 	case channeltype.AliBailian: | ||||
| 		return alibailian.GetRequestURL(meta) | ||||
| 	case channeltype.GeminiOpenAICompatible: | ||||
| 		return geminiv2.GetRequestURL(meta) | ||||
| 	default: | ||||
| 		return GetFullRequestURL(meta.BaseURL, meta.RequestURLPath, meta.ChannelType), nil | ||||
| 	} | ||||
|   | ||||
| @@ -2,19 +2,24 @@ package openai | ||||
|  | ||||
| import ( | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/ai360" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/alibailian" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/baichuan" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/baiduv2" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/deepseek" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/doubao" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/geminiv2" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/groq" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/lingyiwanwu" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/minimax" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/mistral" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/moonshot" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/novita" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/openrouter" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/siliconflow" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/stepfun" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/togetherai" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/xai" | ||||
| 	"github.com/songquanpeng/one-api/relay/adaptor/xunfeiv2" | ||||
| 	"github.com/songquanpeng/one-api/relay/channeltype" | ||||
| ) | ||||
|  | ||||
| @@ -34,6 +39,8 @@ var CompatibleChannels = []int{ | ||||
| 	channeltype.Novita, | ||||
| 	channeltype.SiliconFlow, | ||||
| 	channeltype.XAI, | ||||
| 	channeltype.BaiduV2, | ||||
| 	channeltype.XunfeiV2, | ||||
| } | ||||
|  | ||||
| func GetCompatibleChannelMeta(channelType int) (string, []string) { | ||||
| @@ -68,6 +75,16 @@ func GetCompatibleChannelMeta(channelType int) (string, []string) { | ||||
| 		return "siliconflow", siliconflow.ModelList | ||||
| 	case channeltype.XAI: | ||||
| 		return "xai", xai.ModelList | ||||
| 	case channeltype.BaiduV2: | ||||
| 		return "baiduv2", baiduv2.ModelList | ||||
| 	case channeltype.XunfeiV2: | ||||
| 		return "xunfeiv2", xunfeiv2.ModelList | ||||
| 	case channeltype.OpenRouter: | ||||
| 		return "openrouter", openrouter.ModelList | ||||
| 	case channeltype.AliBailian: | ||||
| 		return "alibailian", alibailian.ModelList | ||||
| 	case channeltype.GeminiOpenAICompatible: | ||||
| 		return "geminiv2", geminiv2.ModelList | ||||
| 	default: | ||||
| 		return "openai", ModelList | ||||
| 	} | ||||
|   | ||||
| @@ -17,6 +17,9 @@ func ResponseText2Usage(responseText string, modelName string, promptTokens int) | ||||
| } | ||||
|  | ||||
| func GetFullRequestURL(baseURL string, requestURL string, channelType int) string { | ||||
| 	if channelType == channeltype.OpenAICompatible { | ||||
| 		return fmt.Sprintf("%s%s", strings.TrimSuffix(baseURL, "/"), strings.TrimPrefix(requestURL, "/v1")) | ||||
| 	} | ||||
| 	fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL) | ||||
|  | ||||
| 	if strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") { | ||||
|   | ||||
							
								
								
									
										20
									
								
								relay/adaptor/openrouter/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								relay/adaptor/openrouter/constants.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,20 @@ | ||||
| package openrouter | ||||
|  | ||||
| var ModelList = []string{ | ||||
| 	"openai/gpt-3.5-turbo", | ||||
| 	"openai/chatgpt-4o-latest", | ||||
| 	"openai/o1", | ||||
| 	"openai/o1-preview", | ||||
| 	"openai/o1-mini", | ||||
| 	"openai/o3-mini", | ||||
| 	"google/gemini-2.0-flash-001", | ||||
| 	"google/gemini-2.0-flash-thinking-exp:free", | ||||
| 	"google/gemini-2.0-flash-lite-preview-02-05:free", | ||||
| 	"google/gemini-2.0-pro-exp-02-05:free", | ||||
| 	"google/gemini-flash-1.5-8b", | ||||
| 	"anthropic/claude-3.5-sonnet", | ||||
| 	"anthropic/claude-3.5-haiku", | ||||
| 	"deepseek/deepseek-r1:free", | ||||
| 	"deepseek/deepseek-r1", | ||||
| 	"qwen/qwen-vl-plus:free", | ||||
| } | ||||
| @@ -16,10 +16,12 @@ import ( | ||||
|  | ||||
| var ModelList = []string{ | ||||
| 	"gemini-pro", "gemini-pro-vision", | ||||
| 	"gemini-1.5-pro-001", "gemini-1.5-flash-001", | ||||
| 	"gemini-1.5-pro-002", "gemini-1.5-flash-002", | ||||
| 	"gemini-2.0-flash-exp", | ||||
| 	"gemini-2.0-flash-thinking-exp", "gemini-2.0-flash-thinking-exp-01-21", | ||||
| 	"gemini-exp-1206", | ||||
| 	"gemini-1.5-pro-001", "gemini-1.5-pro-002", | ||||
| 	"gemini-1.5-flash-001", "gemini-1.5-flash-002", | ||||
| 	"gemini-2.0-flash-exp", "gemini-2.0-flash-001", | ||||
| 	"gemini-2.0-flash-lite-preview-02-05", | ||||
| 	"gemini-2.0-flash-thinking-exp-01-21", | ||||
| } | ||||
|  | ||||
| type Adaptor struct { | ||||
|   | ||||
| @@ -1,5 +1,14 @@ | ||||
| package xai | ||||
|  | ||||
| //https://console.x.ai/ | ||||
|  | ||||
| var ModelList = []string{ | ||||
| 	"grok-2", | ||||
| 	"grok-vision-beta", | ||||
| 	"grok-2-vision-1212", | ||||
| 	"grok-2-vision", | ||||
| 	"grok-2-vision-latest", | ||||
| 	"grok-2-1212", | ||||
| 	"grok-2-latest", | ||||
| 	"grok-beta", | ||||
| } | ||||
|   | ||||
| @@ -1,12 +1,10 @@ | ||||
| package xunfei | ||||
|  | ||||
| var ModelList = []string{ | ||||
| 	"SparkDesk", | ||||
| 	"SparkDesk-v1.1", | ||||
| 	"SparkDesk-v2.1", | ||||
| 	"SparkDesk-v3.1", | ||||
| 	"SparkDesk-v3.1-128K", | ||||
| 	"SparkDesk-v3.5", | ||||
| 	"SparkDesk-v3.5-32K", | ||||
| 	"SparkDesk-v4.0", | ||||
| 	"Spark-Lite", | ||||
| 	"Spark-Pro", | ||||
| 	"Spark-Pro-128K", | ||||
| 	"Spark-Max", | ||||
| 	"Spark-Max-32K", | ||||
| 	"Spark-4.0-Ultra", | ||||
| } | ||||
|   | ||||
							
								
								
									
										97
									
								
								relay/adaptor/xunfei/domain.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										97
									
								
								relay/adaptor/xunfei/domain.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,97 @@ | ||||
| package xunfei | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // https://www.xfyun.cn/doc/spark/Web.html#_1-%E6%8E%A5%E5%8F%A3%E8%AF%B4%E6%98%8E | ||||
|  | ||||
| //Spark4.0 Ultra 请求地址,对应的domain参数为4.0Ultra: | ||||
| // | ||||
| //wss://spark-api.xf-yun.com/v4.0/chat | ||||
| //Spark Max-32K请求地址,对应的domain参数为max-32k | ||||
| // | ||||
| //wss://spark-api.xf-yun.com/chat/max-32k | ||||
| //Spark Max请求地址,对应的domain参数为generalv3.5 | ||||
| // | ||||
| //wss://spark-api.xf-yun.com/v3.5/chat | ||||
| //Spark Pro-128K请求地址,对应的domain参数为pro-128k: | ||||
| // | ||||
| // wss://spark-api.xf-yun.com/chat/pro-128k | ||||
| //Spark Pro请求地址,对应的domain参数为generalv3: | ||||
| // | ||||
| //wss://spark-api.xf-yun.com/v3.1/chat | ||||
| //Spark Lite请求地址,对应的domain参数为lite: | ||||
| // | ||||
| //wss://spark-api.xf-yun.com/v1.1/chat | ||||
|  | ||||
| // Lite、Pro、Pro-128K、Max、Max-32K和4.0 Ultra | ||||
|  | ||||
| func parseAPIVersionByModelName(modelName string) string { | ||||
| 	apiVersion := modelName2APIVersion(modelName) | ||||
| 	if apiVersion != "" { | ||||
| 		return apiVersion | ||||
| 	} | ||||
|  | ||||
| 	index := strings.IndexAny(modelName, "-") | ||||
| 	if index != -1 { | ||||
| 		return modelName[index+1:] | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func modelName2APIVersion(modelName string) string { | ||||
| 	switch modelName { | ||||
| 	case "Spark-Lite": | ||||
| 		return "v1.1" | ||||
| 	case "Spark-Pro": | ||||
| 		return "v3.1" | ||||
| 	case "Spark-Pro-128K": | ||||
| 		return "v3.1-128K" | ||||
| 	case "Spark-Max": | ||||
| 		return "v3.5" | ||||
| 	case "Spark-Max-32K": | ||||
| 		return "v3.5-32K" | ||||
| 	case "Spark-4.0-Ultra": | ||||
| 		return "v4.0" | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| // https://www.xfyun.cn/doc/spark/Web.html#_1-%E6%8E%A5%E5%8F%A3%E8%AF%B4%E6%98%8E | ||||
| func apiVersion2domain(apiVersion string) string { | ||||
| 	switch apiVersion { | ||||
| 	case "v1.1": | ||||
| 		return "lite" | ||||
| 	case "v2.1": | ||||
| 		return "generalv2" | ||||
| 	case "v3.1": | ||||
| 		return "generalv3" | ||||
| 	case "v3.1-128K": | ||||
| 		return "pro-128k" | ||||
| 	case "v3.5": | ||||
| 		return "generalv3.5" | ||||
| 	case "v3.5-32K": | ||||
| 		return "max-32k" | ||||
| 	case "v4.0": | ||||
| 		return "4.0Ultra" | ||||
| 	} | ||||
| 	return "general" + apiVersion | ||||
| } | ||||
|  | ||||
| func getXunfeiAuthUrl(apiVersion string, apiKey string, apiSecret string) (string, string) { | ||||
| 	var authUrl string | ||||
| 	domain := apiVersion2domain(apiVersion) | ||||
| 	switch apiVersion { | ||||
| 	case "v3.1-128K": | ||||
| 		authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/chat/pro-128k"), apiKey, apiSecret) | ||||
| 		break | ||||
| 	case "v3.5-32K": | ||||
| 		authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/chat/max-32k"), apiKey, apiSecret) | ||||
| 		break | ||||
| 	default: | ||||
| 		authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/%s/chat", apiVersion), apiKey, apiSecret) | ||||
| 	} | ||||
| 	return domain, authUrl | ||||
| } | ||||
| @@ -15,6 +15,7 @@ import ( | ||||
|  | ||||
| 	"github.com/gin-gonic/gin" | ||||
| 	"github.com/gorilla/websocket" | ||||
|  | ||||
| 	"github.com/songquanpeng/one-api/common" | ||||
| 	"github.com/songquanpeng/one-api/common/helper" | ||||
| 	"github.com/songquanpeng/one-api/common/logger" | ||||
| @@ -270,48 +271,3 @@ func xunfeiMakeRequest(textRequest model.GeneralOpenAIRequest, domain, authUrl, | ||||
|  | ||||
| 	return dataChan, stopChan, nil | ||||
| } | ||||
|  | ||||
| func parseAPIVersionByModelName(modelName string) string { | ||||
| 	index := strings.IndexAny(modelName, "-") | ||||
| 	if index != -1 { | ||||
| 		return modelName[index+1:] | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| // https://www.xfyun.cn/doc/spark/Web.html#_1-%E6%8E%A5%E5%8F%A3%E8%AF%B4%E6%98%8E | ||||
| func apiVersion2domain(apiVersion string) string { | ||||
| 	switch apiVersion { | ||||
| 	case "v1.1": | ||||
| 		return "lite" | ||||
| 	case "v2.1": | ||||
| 		return "generalv2" | ||||
| 	case "v3.1": | ||||
| 		return "generalv3" | ||||
| 	case "v3.1-128K": | ||||
| 		return "pro-128k" | ||||
| 	case "v3.5": | ||||
| 		return "generalv3.5" | ||||
| 	case "v3.5-32K": | ||||
| 		return "max-32k" | ||||
| 	case "v4.0": | ||||
| 		return "4.0Ultra" | ||||
| 	} | ||||
| 	return "general" + apiVersion | ||||
| } | ||||
|  | ||||
| func getXunfeiAuthUrl(apiVersion string, apiKey string, apiSecret string) (string, string) { | ||||
| 	var authUrl string | ||||
| 	domain := apiVersion2domain(apiVersion) | ||||
| 	switch apiVersion { | ||||
| 	case "v3.1-128K": | ||||
| 		authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/chat/pro-128k"), apiKey, apiSecret) | ||||
| 		break | ||||
| 	case "v3.5-32K": | ||||
| 		authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/chat/max-32k"), apiKey, apiSecret) | ||||
| 		break | ||||
| 	default: | ||||
| 		authUrl = buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/%s/chat", apiVersion), apiKey, apiSecret) | ||||
| 	} | ||||
| 	return domain, authUrl | ||||
| } | ||||
|   | ||||
							
								
								
									
										12
									
								
								relay/adaptor/xunfeiv2/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								relay/adaptor/xunfeiv2/constants.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,12 @@ | ||||
| package xunfeiv2 | ||||
|  | ||||
| // https://www.xfyun.cn/doc/spark/HTTP%E8%B0%83%E7%94%A8%E6%96%87%E6%A1%A3.html#_3-%E8%AF%B7%E6%B1%82%E8%AF%B4%E6%98%8E | ||||
|  | ||||
| var ModelList = []string{ | ||||
| 	"lite", | ||||
| 	"generalv3", | ||||
| 	"pro-128k", | ||||
| 	"generalv3.5", | ||||
| 	"max-32k", | ||||
| 	"4.0Ultra", | ||||
| } | ||||
| @@ -3,8 +3,10 @@ package ratio | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"github.com/songquanpeng/one-api/common/logger" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| var groupRatioLock sync.RWMutex | ||||
| var GroupRatio = map[string]float64{ | ||||
| 	"default": 1, | ||||
| 	"vip":     1, | ||||
| @@ -20,11 +22,15 @@ func GroupRatio2JSONString() string { | ||||
| } | ||||
|  | ||||
| func UpdateGroupRatioByJSONString(jsonStr string) error { | ||||
| 	groupRatioLock.Lock() | ||||
| 	defer groupRatioLock.Unlock() | ||||
| 	GroupRatio = make(map[string]float64) | ||||
| 	return json.Unmarshal([]byte(jsonStr), &GroupRatio) | ||||
| } | ||||
|  | ||||
| func GetGroupRatio(name string) float64 { | ||||
| 	groupRatioLock.RLock() | ||||
| 	defer groupRatioLock.RUnlock() | ||||
| 	ratio, ok := GroupRatio[name] | ||||
| 	if !ok { | ||||
| 		logger.SysError("group ratio not found: " + name) | ||||
|   | ||||
| @@ -4,6 +4,7 @@ import ( | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/songquanpeng/one-api/common/logger" | ||||
| ) | ||||
| @@ -15,6 +16,8 @@ const ( | ||||
| 	RMB       = USD / USD2RMB | ||||
| ) | ||||
|  | ||||
| var modelRatioLock sync.RWMutex | ||||
|  | ||||
| // ModelRatio | ||||
| // https://platform.openai.com/docs/models/model-endpoint-compatibility | ||||
| // https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Blfmc9dlf | ||||
| @@ -92,7 +95,7 @@ var ModelRatio = map[string]float64{ | ||||
| 	"claude-3-sonnet-20240229":   3.0 / 1000 * USD, | ||||
| 	"claude-3-5-sonnet-20240620": 3.0 / 1000 * USD, | ||||
| 	"claude-3-5-sonnet-20241022": 3.0 / 1000 * USD, | ||||
| 	"claude-3-5-sonnet-latest"  : 3.0 / 1000 * USD,	 | ||||
| 	"claude-3-5-sonnet-latest":   3.0 / 1000 * USD, | ||||
| 	"claude-3-opus-20240229":     15.0 / 1000 * USD, | ||||
| 	// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7 | ||||
| 	"ERNIE-4.0-8K":       0.120 * RMB, | ||||
| @@ -112,15 +115,24 @@ var ModelRatio = map[string]float64{ | ||||
| 	"bge-large-en":       0.002 * RMB, | ||||
| 	"tao-8k":             0.002 * RMB, | ||||
| 	// https://ai.google.dev/pricing | ||||
| 	"gemini-pro":                          1, // $0.00025 / 1k characters -> $0.001 / 1k tokens | ||||
| 	"gemini-1.0-pro":                      1, | ||||
| 	"gemini-1.5-pro":                      1, | ||||
| 	"gemini-1.5-pro-001":                  1, | ||||
| 	"gemini-1.5-flash":                    1, | ||||
| 	"gemini-1.5-flash-001":                1, | ||||
| 	"gemini-2.0-flash-exp":                1, | ||||
| 	"gemini-2.0-flash-thinking-exp":       1, | ||||
| 	"gemini-2.0-flash-thinking-exp-01-21": 1, | ||||
| 	// https://cloud.google.com/vertex-ai/generative-ai/pricing | ||||
| 	// "gemma-2-2b-it":                       0, | ||||
| 	// "gemma-2-9b-it":                       0, | ||||
| 	// "gemma-2-27b-it":                      0, | ||||
| 	"gemini-pro":                          0.25 * MILLI_USD, // $0.00025 / 1k characters -> $0.001 / 1k tokens | ||||
| 	"gemini-1.0-pro":                      0.125 * MILLI_USD, | ||||
| 	"gemini-1.5-pro":                      1.25 * MILLI_USD, | ||||
| 	"gemini-1.5-pro-001":                  1.25 * MILLI_USD, | ||||
| 	"gemini-1.5-pro-experimental":         1.25 * MILLI_USD, | ||||
| 	"gemini-1.5-flash":                    0.075 * MILLI_USD, | ||||
| 	"gemini-1.5-flash-001":                0.075 * MILLI_USD, | ||||
| 	"gemini-1.5-flash-8b":                 0.0375 * MILLI_USD, | ||||
| 	"gemini-2.0-flash-exp":                0.075 * MILLI_USD, | ||||
| 	"gemini-2.0-flash":                    0.15 * MILLI_USD, | ||||
| 	"gemini-2.0-flash-001":                0.15 * MILLI_USD, | ||||
| 	"gemini-2.0-flash-lite-preview-02-05": 0.075 * MILLI_USD, | ||||
| 	"gemini-2.0-flash-thinking-exp-01-21": 0.075 * MILLI_USD, | ||||
| 	"gemini-2.0-pro-exp-02-05":            1.25 * MILLI_USD, | ||||
| 	"aqa":                                 1, | ||||
| 	// https://open.bigmodel.cn/pricing | ||||
| 	"glm-zero-preview": 0.01 * RMB, | ||||
| @@ -417,11 +429,15 @@ func ModelRatio2JSONString() string { | ||||
| } | ||||
|  | ||||
| func UpdateModelRatioByJSONString(jsonStr string) error { | ||||
| 	modelRatioLock.Lock() | ||||
| 	defer modelRatioLock.Unlock() | ||||
| 	ModelRatio = make(map[string]float64) | ||||
| 	return json.Unmarshal([]byte(jsonStr), &ModelRatio) | ||||
| } | ||||
|  | ||||
| func GetModelRatio(name string, channelType int) float64 { | ||||
| 	modelRatioLock.RLock() | ||||
| 	defer modelRatioLock.RUnlock() | ||||
| 	if strings.HasPrefix(name, "qwen-") && strings.HasSuffix(name, "-internet") { | ||||
| 		name = strings.TrimSuffix(name, "-internet") | ||||
| 	} | ||||
|   | ||||
| @@ -48,5 +48,10 @@ const ( | ||||
| 	SiliconFlow | ||||
| 	XAI | ||||
| 	Replicate | ||||
| 	BaiduV2 | ||||
| 	XunfeiV2 | ||||
| 	AliBailian | ||||
| 	OpenAICompatible | ||||
| 	GeminiOpenAICompatible | ||||
| 	Dummy | ||||
| ) | ||||
|   | ||||
| @@ -48,6 +48,12 @@ var ChannelBaseURLs = []string{ | ||||
| 	"https://api.siliconflow.cn",                // 44 | ||||
| 	"https://api.x.ai",                          // 45 | ||||
| 	"https://api.replicate.com/v1/models/",      // 46 | ||||
| 	"https://qianfan.baidubce.com",              // 47 | ||||
| 	"https://spark-api-open.xf-yun.com",         // 48 | ||||
| 	"https://dashscope.aliyuncs.com",            // 49 | ||||
| 	"",                                          // 50 | ||||
|  | ||||
| 	"https://generativelanguage.googleapis.com/v1beta/openai/", // 51 | ||||
| } | ||||
|  | ||||
| func init() { | ||||
|   | ||||
| @@ -38,7 +38,7 @@ func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode { | ||||
| 	textRequest.Model, _ = getMappedModelName(textRequest.Model, meta.ModelMapping) | ||||
| 	meta.ActualModelName = textRequest.Model | ||||
| 	// set system prompt if not empty | ||||
| 	systemPromptReset := setSystemPrompt(ctx, textRequest, meta.SystemPrompt) | ||||
| 	systemPromptReset := setSystemPrompt(ctx, textRequest, meta.ForcedSystemPrompt) | ||||
| 	// get model ratio & group ratio | ||||
| 	modelRatio := billingratio.GetModelRatio(textRequest.Model, meta.ChannelType) | ||||
| 	groupRatio := billingratio.GetGroupRatio(meta.Group) | ||||
| @@ -88,7 +88,11 @@ func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode { | ||||
| } | ||||
|  | ||||
| func getRequestBody(c *gin.Context, meta *meta.Meta, textRequest *model.GeneralOpenAIRequest, adaptor adaptor.Adaptor) (io.Reader, error) { | ||||
| 	if !config.EnforceIncludeUsage && meta.APIType == apitype.OpenAI && meta.OriginModelName == meta.ActualModelName && meta.ChannelType != channeltype.Baichuan { | ||||
| 	if !config.EnforceIncludeUsage && | ||||
| 		meta.APIType == apitype.OpenAI && | ||||
| 		meta.OriginModelName == meta.ActualModelName && | ||||
| 		meta.ChannelType != channeltype.Baichuan && | ||||
| 		meta.ForcedSystemPrompt == "" { | ||||
| 		// no need to convert request for openai | ||||
| 		return c.Request.Body, nil | ||||
| 	} | ||||
|   | ||||
| @@ -30,29 +30,29 @@ type Meta struct { | ||||
| 	// OriginModelName is the model name from the raw user request | ||||
| 	OriginModelName string | ||||
| 	// ActualModelName is the model name after mapping | ||||
| 	ActualModelName string | ||||
| 	RequestURLPath  string | ||||
| 	PromptTokens    int // only for DoResponse | ||||
| 	SystemPrompt    string | ||||
| 	StartTime       time.Time | ||||
| 	ActualModelName    string | ||||
| 	RequestURLPath     string | ||||
| 	PromptTokens       int // only for DoResponse | ||||
| 	ForcedSystemPrompt string | ||||
| 	StartTime          time.Time | ||||
| } | ||||
|  | ||||
| func GetByContext(c *gin.Context) *Meta { | ||||
| 	meta := Meta{ | ||||
| 		Mode:            relaymode.GetByPath(c.Request.URL.Path), | ||||
| 		ChannelType:     c.GetInt(ctxkey.Channel), | ||||
| 		ChannelId:       c.GetInt(ctxkey.ChannelId), | ||||
| 		TokenId:         c.GetInt(ctxkey.TokenId), | ||||
| 		TokenName:       c.GetString(ctxkey.TokenName), | ||||
| 		UserId:          c.GetInt(ctxkey.Id), | ||||
| 		Group:           c.GetString(ctxkey.Group), | ||||
| 		ModelMapping:    c.GetStringMapString(ctxkey.ModelMapping), | ||||
| 		OriginModelName: c.GetString(ctxkey.RequestModel), | ||||
| 		BaseURL:         c.GetString(ctxkey.BaseURL), | ||||
| 		APIKey:          strings.TrimPrefix(c.Request.Header.Get("Authorization"), "Bearer "), | ||||
| 		RequestURLPath:  c.Request.URL.String(), | ||||
| 		SystemPrompt:    c.GetString(ctxkey.SystemPrompt), | ||||
| 		StartTime:       time.Now(), | ||||
| 		Mode:               relaymode.GetByPath(c.Request.URL.Path), | ||||
| 		ChannelType:        c.GetInt(ctxkey.Channel), | ||||
| 		ChannelId:          c.GetInt(ctxkey.ChannelId), | ||||
| 		TokenId:            c.GetInt(ctxkey.TokenId), | ||||
| 		TokenName:          c.GetString(ctxkey.TokenName), | ||||
| 		UserId:             c.GetInt(ctxkey.Id), | ||||
| 		Group:              c.GetString(ctxkey.Group), | ||||
| 		ModelMapping:       c.GetStringMapString(ctxkey.ModelMapping), | ||||
| 		OriginModelName:    c.GetString(ctxkey.RequestModel), | ||||
| 		BaseURL:            c.GetString(ctxkey.BaseURL), | ||||
| 		APIKey:             strings.TrimPrefix(c.Request.Header.Get("Authorization"), "Bearer "), | ||||
| 		RequestURLPath:     c.Request.URL.String(), | ||||
| 		ForcedSystemPrompt: c.GetString(ctxkey.SystemPrompt), | ||||
| 		StartTime:          time.Now(), | ||||
| 	} | ||||
| 	cfg, ok := c.Get(ctxkey.Config) | ||||
| 	if ok { | ||||
|   | ||||
| @@ -26,6 +26,7 @@ type GeneralOpenAIRequest struct { | ||||
| 	Messages            []Message       `json:"messages,omitempty"` | ||||
| 	Model               string          `json:"model,omitempty"` | ||||
| 	Store               *bool           `json:"store,omitempty"` | ||||
| 	ReasoningEffort     *string         `json:"reasoning_effort,omitempty"` | ||||
| 	Metadata            any             `json:"metadata,omitempty"` | ||||
| 	FrequencyPenalty    *float64        `json:"frequency_penalty,omitempty"` | ||||
| 	LogitBias           any             `json:"logit_bias,omitempty"` | ||||
|   | ||||
| @@ -1,11 +1,12 @@ | ||||
| package model | ||||
|  | ||||
| type Message struct { | ||||
| 	Role       string  `json:"role,omitempty"` | ||||
| 	Content    any     `json:"content,omitempty"` | ||||
| 	Name       *string `json:"name,omitempty"` | ||||
| 	ToolCalls  []Tool  `json:"tool_calls,omitempty"` | ||||
| 	ToolCallId string  `json:"tool_call_id,omitempty"` | ||||
| 	Role             string  `json:"role,omitempty"` | ||||
| 	Content          any     `json:"content,omitempty"` | ||||
| 	ReasoningContent any     `json:"reasoning_content,omitempty"` | ||||
| 	Name             *string `json:"name,omitempty"` | ||||
| 	ToolCalls        []Tool  `json:"tool_calls,omitempty"` | ||||
| 	ToolCallId       string  `json:"tool_call_id,omitempty"` | ||||
| } | ||||
|  | ||||
| func (m Message) IsStringContent() bool { | ||||
|   | ||||
| @@ -4,6 +4,14 @@ type Usage struct { | ||||
| 	PromptTokens     int `json:"prompt_tokens"` | ||||
| 	CompletionTokens int `json:"completion_tokens"` | ||||
| 	TotalTokens      int `json:"total_tokens"` | ||||
|  | ||||
| 	CompletionTokensDetails *CompletionTokensDetails `json:"completion_tokens_details,omitempty"` | ||||
| } | ||||
|  | ||||
| type CompletionTokensDetails struct { | ||||
| 	ReasoningTokens          int `json:"reasoning_tokens"` | ||||
| 	AcceptedPredictionTokens int `json:"accepted_prediction_tokens"` | ||||
| 	RejectedPredictionTokens int `json:"rejected_prediction_tokens"` | ||||
| } | ||||
|  | ||||
| type Error struct { | ||||
|   | ||||
| @@ -7,7 +7,7 @@ export const CHANNEL_OPTIONS = [ | ||||
|   { key: 24, text: 'Google Gemini', value: 24, color: 'orange' }, | ||||
|   { key: 28, text: 'Mistral AI', value: 28, color: 'orange' }, | ||||
|   { key: 41, text: 'Novita', value: 41, color: 'purple' }, | ||||
|   { key: 40, text: '字节跳动豆包', value: 40, color: 'blue' }, | ||||
|   {key: 40, text: '字节火山引擎', value: 40, color: 'blue'}, | ||||
|   { key: 15, text: '百度文心千帆', value: 15, color: 'blue' }, | ||||
|   { key: 17, text: '阿里通义千问', value: 17, color: 'orange' }, | ||||
|   { key: 18, text: '讯飞星火认知', value: 18, color: 'blue' }, | ||||
| @@ -35,7 +35,7 @@ export const CHANNEL_OPTIONS = [ | ||||
|   { key: 8, text: '自定义渠道', value: 8, color: 'pink' }, | ||||
|   { key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' }, | ||||
|   { key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' }, | ||||
|   { key: 20, text: '代理:OpenRouter', value: 20, color: 'black' }, | ||||
|   {key: 20, text: 'OpenRouter', value: 20, color: 'black'}, | ||||
|   { key: 2, text: '代理:API2D', value: 2, color: 'blue' }, | ||||
|   { key: 5, text: '代理:OpenAI-SB', value: 5, color: 'brown' }, | ||||
|   { key: 7, text: '代理:OhMyGPT', value: 7, color: 'purple' }, | ||||
|   | ||||
| @@ -49,7 +49,7 @@ export const CHANNEL_OPTIONS = { | ||||
|   }, | ||||
|   40: { | ||||
|     key: 40, | ||||
|     text: '字节跳动豆包', | ||||
|     text: '字节火山引擎', | ||||
|     value: 40, | ||||
|     color: 'primary' | ||||
|   }, | ||||
| @@ -217,7 +217,7 @@ export const CHANNEL_OPTIONS = { | ||||
|   }, | ||||
|   20: { | ||||
|     key: 20, | ||||
|     text: '代理:OpenRouter', | ||||
|       text: 'OpenRouter', | ||||
|     value: 20, | ||||
|     color: 'success' | ||||
|   }, | ||||
|   | ||||
| @@ -1,17 +1,7 @@ | ||||
| import React, { useEffect, useState } from 'react'; | ||||
| import { useTranslation } from 'react-i18next'; | ||||
| import { | ||||
|   Button, | ||||
|   Dropdown, | ||||
|   Form, | ||||
|   Input, | ||||
|   Label, | ||||
|   Message, | ||||
|   Pagination, | ||||
|   Popup, | ||||
|   Table, | ||||
| } from 'semantic-ui-react'; | ||||
| import { Link } from 'react-router-dom'; | ||||
| import React, {useEffect, useState} from 'react'; | ||||
| import {useTranslation} from 'react-i18next'; | ||||
| import {Button, Dropdown, Form, Input, Label, Message, Pagination, Popup, Table,} from 'semantic-ui-react'; | ||||
| import {Link} from 'react-router-dom'; | ||||
| import { | ||||
|   API, | ||||
|   loadChannelModels, | ||||
| @@ -23,8 +13,8 @@ import { | ||||
|   timestamp2string, | ||||
| } from '../helpers'; | ||||
|  | ||||
| import { CHANNEL_OPTIONS, ITEMS_PER_PAGE } from '../constants'; | ||||
| import { renderGroup, renderNumber } from '../helpers/render'; | ||||
| import {CHANNEL_OPTIONS, ITEMS_PER_PAGE} from '../constants'; | ||||
| import {renderGroup, renderNumber} from '../helpers/render'; | ||||
|  | ||||
| function renderTimestamp(timestamp) { | ||||
|   return <>{timestamp2string(timestamp)}</>; | ||||
| @@ -54,6 +44,9 @@ function renderType(type, t) { | ||||
| function renderBalance(type, balance, t) { | ||||
|   switch (type) { | ||||
|     case 1: // OpenAI | ||||
|         if (balance === 0) { | ||||
|             return <span>{t('channel.table.balance_not_supported')}</span>; | ||||
|         } | ||||
|       return <span>${balance.toFixed(2)}</span>; | ||||
|     case 4: // CloseAI | ||||
|       return <span>¥{balance.toFixed(2)}</span>; | ||||
| @@ -67,6 +60,8 @@ function renderBalance(type, balance, t) { | ||||
|       return <span>¥{balance.toFixed(2)}</span>; | ||||
|     case 13: // AIGC2D | ||||
|       return <span>{renderNumber(balance)}</span>; | ||||
|     case 20: // OpenRouter | ||||
|       return <span>${balance.toFixed(2)}</span>; | ||||
|     case 36: // DeepSeek | ||||
|       return <span>¥{balance.toFixed(2)}</span>; | ||||
|     case 44: // SiliconFlow | ||||
| @@ -93,30 +88,32 @@ const ChannelsTable = () => { | ||||
|   const [showPrompt, setShowPrompt] = useState(shouldShowPrompt(promptID)); | ||||
|   const [showDetail, setShowDetail] = useState(isShowDetail()); | ||||
|  | ||||
|   const processChannelData = (channel) => { | ||||
|     if (channel.models === '') { | ||||
|       channel.models = []; | ||||
|       channel.test_model = ''; | ||||
|     } else { | ||||
|       channel.models = channel.models.split(','); | ||||
|       if (channel.models.length > 0) { | ||||
|         channel.test_model = channel.models[0]; | ||||
|       } | ||||
|       channel.model_options = channel.models.map((model) => { | ||||
|         return { | ||||
|           key: model, | ||||
|           text: model, | ||||
|           value: model, | ||||
|         }; | ||||
|       }); | ||||
|       console.log('channel', channel); | ||||
|     } | ||||
|     return channel; | ||||
|   }; | ||||
|  | ||||
|   const loadChannels = async (startIdx) => { | ||||
|     const res = await API.get(`/api/channel/?p=${startIdx}`); | ||||
|     const { success, message, data } = res.data; | ||||
|     if (success) { | ||||
|       let localChannels = data.map((channel) => { | ||||
|         if (channel.models === '') { | ||||
|           channel.models = []; | ||||
|           channel.test_model = ''; | ||||
|         } else { | ||||
|           channel.models = channel.models.split(','); | ||||
|           if (channel.models.length > 0) { | ||||
|             channel.test_model = channel.models[0]; | ||||
|           } | ||||
|           channel.model_options = channel.models.map((model) => { | ||||
|             return { | ||||
|               key: model, | ||||
|               text: model, | ||||
|               value: model, | ||||
|             }; | ||||
|           }); | ||||
|           console.log('channel', channel); | ||||
|         } | ||||
|         return channel; | ||||
|       }); | ||||
|       let localChannels = data.map(processChannelData); | ||||
|       if (startIdx === 0) { | ||||
|         setChannels(localChannels); | ||||
|       } else { | ||||
| @@ -301,7 +298,8 @@ const ChannelsTable = () => { | ||||
|     const res = await API.get(`/api/channel/search?keyword=${searchKeyword}`); | ||||
|     const { success, message, data } = res.data; | ||||
|     if (success) { | ||||
|       setChannels(data); | ||||
|       let localChannels = data.map(processChannelData); | ||||
|       setChannels(localChannels); | ||||
|       setActivePage(1); | ||||
|     } else { | ||||
|       showError(message); | ||||
| @@ -495,7 +493,6 @@ const ChannelsTable = () => { | ||||
|               onClick={() => { | ||||
|                 sortChannel('balance'); | ||||
|               }} | ||||
|               hidden={!showDetail} | ||||
|             > | ||||
|               {t('channel.table.balance')} | ||||
|             </Table.HeaderCell> | ||||
| @@ -504,6 +501,7 @@ const ChannelsTable = () => { | ||||
|               onClick={() => { | ||||
|                 sortChannel('priority'); | ||||
|               }} | ||||
|               hidden={!showDetail} | ||||
|             > | ||||
|               {t('channel.table.priority')} | ||||
|             </Table.HeaderCell> | ||||
| @@ -543,7 +541,7 @@ const ChannelsTable = () => { | ||||
|                       basic | ||||
|                     /> | ||||
|                   </Table.Cell> | ||||
|                   <Table.Cell hidden={!showDetail}> | ||||
|                   <Table.Cell> | ||||
|                     <Popup | ||||
|                       trigger={ | ||||
|                         <span | ||||
| @@ -559,7 +557,7 @@ const ChannelsTable = () => { | ||||
|                       basic | ||||
|                     /> | ||||
|                   </Table.Cell> | ||||
|                   <Table.Cell> | ||||
|                   <Table.Cell hidden={!showDetail}> | ||||
|                     <Popup | ||||
|                       trigger={ | ||||
|                         <Input | ||||
| @@ -593,7 +591,15 @@ const ChannelsTable = () => { | ||||
|                     /> | ||||
|                   </Table.Cell> | ||||
|                   <Table.Cell> | ||||
|                     <div> | ||||
|                     <div | ||||
|                       style={{ | ||||
|                         display: 'flex', | ||||
|                         alignItems: 'center', | ||||
|                         flexWrap: 'wrap', | ||||
|                         gap: '2px', | ||||
|                         rowGap: '6px', | ||||
|                       }} | ||||
|                     > | ||||
|                       <Button | ||||
|                         size={'tiny'} | ||||
|                         positive | ||||
|   | ||||
| @@ -1,48 +1,108 @@ | ||||
| export const CHANNEL_OPTIONS = [ | ||||
|     { key: 1, text: 'OpenAI', value: 1, color: 'green' }, | ||||
|     { key: 14, text: 'Anthropic Claude', value: 14, color: 'black' }, | ||||
|     { key: 33, text: 'AWS', value: 33, color: 'black' }, | ||||
|     { key: 3, text: 'Azure OpenAI', value: 3, color: 'olive' }, | ||||
|     { key: 11, text: 'Google PaLM2', value: 11, color: 'orange' }, | ||||
|     { key: 24, text: 'Google Gemini', value: 24, color: 'orange' }, | ||||
|     { key: 28, text: 'Mistral AI', value: 28, color: 'orange' }, | ||||
|     { key: 41, text: 'Novita', value: 41, color: 'purple' }, | ||||
|     { key: 40, text: '字节跳动豆包', value: 40, color: 'blue' }, | ||||
|     { key: 15, text: '百度文心千帆', value: 15, color: 'blue' }, | ||||
|     { key: 17, text: '阿里通义千问', value: 17, color: 'orange' }, | ||||
|     { key: 18, text: '讯飞星火认知', value: 18, color: 'blue' }, | ||||
|     { key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet' }, | ||||
|     { key: 19, text: '360 智脑', value: 19, color: 'blue' }, | ||||
|     { key: 25, text: 'Moonshot AI', value: 25, color: 'black' }, | ||||
|     { key: 23, text: '腾讯混元', value: 23, color: 'teal' }, | ||||
|     { key: 26, text: '百川大模型', value: 26, color: 'orange' }, | ||||
|     { key: 27, text: 'MiniMax', value: 27, color: 'red' }, | ||||
|     { key: 29, text: 'Groq', value: 29, color: 'orange' }, | ||||
|     { key: 30, text: 'Ollama', value: 30, color: 'black' }, | ||||
|     { key: 31, text: '零一万物', value: 31, color: 'green' }, | ||||
|     { key: 32, text: '阶跃星辰', value: 32, color: 'blue' }, | ||||
|     { key: 34, text: 'Coze', value: 34, color: 'blue' }, | ||||
|     { key: 35, text: 'Cohere', value: 35, color: 'blue' }, | ||||
|     { key: 36, text: 'DeepSeek', value: 36, color: 'black' }, | ||||
|     { key: 37, text: 'Cloudflare', value: 37, color: 'orange' }, | ||||
|     { key: 38, text: 'DeepL', value: 38, color: 'black' }, | ||||
|     { key: 39, text: 'together.ai', value: 39, color: 'blue' }, | ||||
|     { key: 42, text: 'VertexAI', value: 42, color: 'blue' }, | ||||
|     { key: 43, text: 'Proxy', value: 43, color: 'blue' }, | ||||
|     { key: 44, text: 'SiliconFlow', value: 44, color: 'blue' }, | ||||
|     { key: 45, text: 'xAI', value: 45, color: 'blue' }, | ||||
|     { key: 46, text: 'Replicate', value: 46, color: 'blue' }, | ||||
|     { key: 8, text: '自定义渠道', value: 8, color: 'pink' }, | ||||
|     { key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' }, | ||||
|     { key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' }, | ||||
|     { key: 20, text: '代理:OpenRouter', value: 20, color: 'black' }, | ||||
|     { key: 2, text: '代理:API2D', value: 2, color: 'blue' }, | ||||
|     { key: 5, text: '代理:OpenAI-SB', value: 5, color: 'brown' }, | ||||
|     { key: 7, text: '代理:OhMyGPT', value: 7, color: 'purple' }, | ||||
|     { key: 10, text: '代理:AI Proxy', value: 10, color: 'purple' }, | ||||
|     { key: 4, text: '代理:CloseAI', value: 4, color: 'teal' }, | ||||
|     { key: 6, text: '代理:OpenAI Max', value: 6, color: 'violet' }, | ||||
|     { key: 9, text: '代理:AI.LS', value: 9, color: 'yellow' }, | ||||
|     { key: 12, text: '代理:API2GPT', value: 12, color: 'blue' }, | ||||
|     { key: 13, text: '代理:AIGC2D', value: 13, color: 'purple' } | ||||
|   { key: 1, text: 'OpenAI', value: 1, color: 'green' }, | ||||
|   { | ||||
|     key: 50, | ||||
|     text: 'OpenAI 兼容', | ||||
|     value: 50, | ||||
|     color: 'olive', | ||||
|     description: 'OpenAI 兼容渠道,支持设置 Base URL', | ||||
|   }, | ||||
|   {key: 14, text: 'Anthropic', value: 14, color: 'black'}, | ||||
|   { key: 33, text: 'AWS', value: 33, color: 'black' }, | ||||
|   {key: 3, text: 'Azure', value: 3, color: 'olive'}, | ||||
|   {key: 11, text: 'PaLM2', value: 11, color: 'orange'}, | ||||
|   {key: 24, text: 'Gemini', value: 24, color: 'orange'}, | ||||
|   { | ||||
|     key: 51, | ||||
|     text: 'Gemini (OpenAI)', | ||||
|     value: 51, | ||||
|     color: 'orange', | ||||
|     description: 'Gemini OpenAI 兼容格式', | ||||
|   }, | ||||
|   { key: 28, text: 'Mistral AI', value: 28, color: 'orange' }, | ||||
|   { key: 41, text: 'Novita', value: 41, color: 'purple' }, | ||||
|   { | ||||
|     key: 40, | ||||
|     text: '字节火山引擎', | ||||
|     value: 40, | ||||
|     color: 'blue', | ||||
|     description: '原字节跳动豆包', | ||||
|   }, | ||||
|   { | ||||
|     key: 15, | ||||
|     text: '百度文心千帆', | ||||
|     value: 15, | ||||
|     color: 'blue', | ||||
|     tip: '请前往<a href="https://console.bce.baidu.com/qianfan/ais/console/applicationConsole/application/v1" target="_blank">此处</a>获取 AK(API Key)以及 SK(Secret Key),注意,V2 版本接口请使用 <strong>百度文心千帆 V2 </strong>渠道类型', | ||||
|   }, | ||||
|   { | ||||
|     key: 47, | ||||
|     text: '百度文心千帆 V2', | ||||
|     value: 47, | ||||
|     color: 'blue', | ||||
|     tip: '请前往<a href="https://console.bce.baidu.com/iam/#/iam/apikey/list" target="_blank">此处</a>获取 API Key,注意本渠道仅支持<a target="_blank" href="https://cloud.baidu.com/doc/WENXINWORKSHOP/s/em4tsqo3v">推理服务 V2</a>相关模型', | ||||
|   }, | ||||
|   { | ||||
|     key: 17, | ||||
|     text: '阿里通义千问', | ||||
|     value: 17, | ||||
|     color: 'orange', | ||||
|     tip: '如需使用阿里云百炼,请使用<strong>阿里云百炼</strong>渠道', | ||||
|   }, | ||||
|   { key: 49, text: '阿里云百炼', value: 49, color: 'orange' }, | ||||
|   { | ||||
|     key: 18, | ||||
|     text: '讯飞星火认知', | ||||
|     value: 18, | ||||
|     color: 'blue', | ||||
|     tip: '本渠道基于讯飞 WebSocket 版本 API,如需 HTTP 版本,请使用<strong>讯飞星火认知 V2</strong>渠道', | ||||
|   }, | ||||
|   { | ||||
|     key: 48, | ||||
|     text: '讯飞星火认知 V2', | ||||
|     value: 48, | ||||
|     color: 'blue', | ||||
|     tip: 'HTTP 版本的讯飞接口,前往<a href="https://console.xfyun.cn/services/cbm" target="_blank">此处</a>获取 HTTP 服务接口认证密钥', | ||||
|   }, | ||||
|   { key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet' }, | ||||
|   { key: 19, text: '360 智脑', value: 19, color: 'blue' }, | ||||
|   { key: 25, text: 'Moonshot AI', value: 25, color: 'black' }, | ||||
|   { key: 23, text: '腾讯混元', value: 23, color: 'teal' }, | ||||
|   { key: 26, text: '百川大模型', value: 26, color: 'orange' }, | ||||
|   { key: 27, text: 'MiniMax', value: 27, color: 'red' }, | ||||
|   { key: 29, text: 'Groq', value: 29, color: 'orange' }, | ||||
|   { key: 30, text: 'Ollama', value: 30, color: 'black' }, | ||||
|   { key: 31, text: '零一万物', value: 31, color: 'green' }, | ||||
|   { key: 32, text: '阶跃星辰', value: 32, color: 'blue' }, | ||||
|   { key: 34, text: 'Coze', value: 34, color: 'blue' }, | ||||
|   { key: 35, text: 'Cohere', value: 35, color: 'blue' }, | ||||
|   { key: 36, text: 'DeepSeek', value: 36, color: 'black' }, | ||||
|   { key: 37, text: 'Cloudflare', value: 37, color: 'orange' }, | ||||
|   { key: 38, text: 'DeepL', value: 38, color: 'black' }, | ||||
|   { key: 39, text: 'together.ai', value: 39, color: 'blue' }, | ||||
|   { key: 42, text: 'VertexAI', value: 42, color: 'blue' }, | ||||
|   { key: 43, text: 'Proxy', value: 43, color: 'blue' }, | ||||
|   { key: 44, text: 'SiliconFlow', value: 44, color: 'blue' }, | ||||
|   { key: 45, text: 'xAI', value: 45, color: 'blue' }, | ||||
|   { key: 46, text: 'Replicate', value: 46, color: 'blue' }, | ||||
|   { | ||||
|     key: 8, | ||||
|     text: '自定义渠道', | ||||
|     value: 8, | ||||
|     color: 'pink', | ||||
|     tip: '不推荐使用,请使用 <strong>OpenAI 兼容</strong>渠道类型。注意,这里所需要填入的代理地址仅会在实际请求时替换域名部分,如果你想填入 OpenAI SDK 中所要求的 Base URL,请使用 OpenAI 兼容渠道类型', | ||||
|     description: '不推荐使用,请使用 OpenAI 兼容渠道类型', | ||||
|   }, | ||||
|   { key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' }, | ||||
|   { key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' }, | ||||
|   { key: 20, text: 'OpenRouter', value: 20, color: 'black' }, | ||||
|   { key: 2, text: '代理:API2D', value: 2, color: 'blue' }, | ||||
|   { key: 5, text: '代理:OpenAI-SB', value: 5, color: 'brown' }, | ||||
|   { key: 7, text: '代理:OhMyGPT', value: 7, color: 'purple' }, | ||||
|   { key: 10, text: '代理:AI Proxy', value: 10, color: 'purple' }, | ||||
|   { key: 4, text: '代理:CloseAI', value: 4, color: 'teal' }, | ||||
|   { key: 6, text: '代理:OpenAI Max', value: 6, color: 'violet' }, | ||||
|   { key: 9, text: '代理:AI.LS', value: 9, color: 'yellow' }, | ||||
|   { key: 12, text: '代理:API2GPT', value: 12, color: 'blue' }, | ||||
|   { key: 13, text: '代理:AIGC2D', value: 13, color: 'purple' }, | ||||
| ]; | ||||
|   | ||||
							
								
								
									
										13
									
								
								web/default/src/helpers/helper.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								web/default/src/helpers/helper.js
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,13 @@ | ||||
| import {CHANNEL_OPTIONS} from '../constants'; | ||||
|  | ||||
| let channelMap = undefined; | ||||
|  | ||||
| export function getChannelOption(channelId) { | ||||
|     if (channelMap === undefined) { | ||||
|         channelMap = {}; | ||||
|         CHANNEL_OPTIONS.forEach((option) => { | ||||
|             channelMap[option.key] = option; | ||||
|         }); | ||||
|     } | ||||
|     return channelMap[channelId]; | ||||
| } | ||||
| @@ -1,5 +1,6 @@ | ||||
| import { Label } from 'semantic-ui-react'; | ||||
| import { useTranslation } from 'react-i18next'; | ||||
| import { Label, Message } from 'semantic-ui-react'; | ||||
| import { getChannelOption } from './helper'; | ||||
| import React from 'react'; | ||||
|  | ||||
| export function renderText(text, limit) { | ||||
|   if (text.length > limit) { | ||||
| @@ -15,7 +16,15 @@ export function renderGroup(group) { | ||||
|   let groups = group.split(','); | ||||
|   groups.sort(); | ||||
|   return ( | ||||
|     <> | ||||
|     <div | ||||
|       style={{ | ||||
|         display: 'flex', | ||||
|         alignItems: 'center', | ||||
|         flexWrap: 'wrap', | ||||
|         gap: '2px', | ||||
|         rowGap: '6px', | ||||
|       }} | ||||
|     > | ||||
|       {groups.map((group) => { | ||||
|         if (group === 'vip' || group === 'pro') { | ||||
|           return <Label color='yellow'>{group}</Label>; | ||||
| @@ -24,7 +33,7 @@ export function renderGroup(group) { | ||||
|         } | ||||
|         return <Label>{group}</Label>; | ||||
|       })} | ||||
|     </> | ||||
|     </div> | ||||
|   ); | ||||
| } | ||||
|  | ||||
| @@ -98,3 +107,15 @@ export function renderColorLabel(text) { | ||||
|     </Label> | ||||
|   ); | ||||
| } | ||||
|  | ||||
| export function renderChannelTip(channelId) { | ||||
|   let channel = getChannelOption(channelId); | ||||
|   if (channel === undefined || channel.tip === undefined) { | ||||
|     return <></>; | ||||
|   } | ||||
|   return ( | ||||
|     <Message> | ||||
|       <div dangerouslySetInnerHTML={{ __html: channel.tip }}></div> | ||||
|     </Message> | ||||
|   ); | ||||
| } | ||||
|   | ||||
| @@ -1,7 +1,7 @@ | ||||
| import { toast } from 'react-toastify'; | ||||
| import { toastConstants } from '../constants'; | ||||
| import {toast} from 'react-toastify'; | ||||
| import {toastConstants} from '../constants'; | ||||
| import React from 'react'; | ||||
| import { API } from './api'; | ||||
| import {API} from './api'; | ||||
|  | ||||
| const HTMLToastContent = ({ htmlContent }) => { | ||||
|   return <div dangerouslySetInnerHTML={{ __html: htmlContent }} />; | ||||
| @@ -74,6 +74,7 @@ if (isMobile()) { | ||||
| } | ||||
|  | ||||
| export function showError(error) { | ||||
|   if (!error) return; | ||||
|   console.error(error); | ||||
|   if (error.message) { | ||||
|     if (error.name === 'AxiosError') { | ||||
| @@ -158,17 +159,7 @@ export function timestamp2string(timestamp) { | ||||
|     second = '0' + second; | ||||
|   } | ||||
|   return ( | ||||
|     year + | ||||
|     '-' + | ||||
|     month + | ||||
|     '-' + | ||||
|     day + | ||||
|     ' ' + | ||||
|     hour + | ||||
|     ':' + | ||||
|     minute + | ||||
|     ':' + | ||||
|     second | ||||
|       year + '-' + month + '-' + day + ' ' + hour + ':' + minute + ':' + second | ||||
|   ); | ||||
| } | ||||
|  | ||||
| @@ -193,7 +184,6 @@ export const verifyJSON = (str) => { | ||||
| export function shouldShowPrompt(id) { | ||||
|   let prompt = localStorage.getItem(`prompt-${id}`); | ||||
|   return !prompt; | ||||
|  | ||||
| } | ||||
|  | ||||
| export function setPromptShown(id) { | ||||
|   | ||||
| @@ -104,8 +104,10 @@ | ||||
|       "model_mapping_placeholder": "Optional, used to modify model names in request body. A JSON string where keys are request model names and values are target model names", | ||||
|       "system_prompt": "System Prompt", | ||||
|       "system_prompt_placeholder": "Optional, used to force set system prompt. Use with custom model & model mapping. First create a unique custom model name above, then map it to a natively supported model", | ||||
|       "base_url": "Proxy", | ||||
|       "base_url_placeholder": "Optional, used for API calls through proxy. Enter proxy address in format: https://domain.com", | ||||
|       "proxy_url": "Proxy", | ||||
|       "proxy_url_placeholder": "This is optional and used for API calls via a proxy. Please enter the proxy URL, formatted as: https://domain.com", | ||||
|       "base_url": "Base URL", | ||||
|       "base_url_placeholder": "The Base URL required by the OpenAPI SDK", | ||||
|       "key": "Key", | ||||
|       "key_placeholder": "Please enter key", | ||||
|       "batch": "Batch Create", | ||||
|   | ||||
| @@ -104,8 +104,10 @@ | ||||
|       "model_mapping_placeholder": "此项可选,用于修改请求体中的模型名称,为一个 JSON 字符串,键为请求中模型名称,值为要替换的模型名称", | ||||
|       "system_prompt": "系统提示词", | ||||
|       "system_prompt_placeholder": "此项可选,用于强制设置给定的系统提示词,请配合自定义模型 & 模型重定向使用,首先创建一个唯一的自定义模型名称并在上面填入,之后将该自定义模型重定向映射到该渠道一个原生支持的模型", | ||||
|       "base_url": "代理", | ||||
|       "base_url_placeholder": "此项可选,用于通过代理站来进行 API 调用,请输入代理站地址,格式为:https://domain.com", | ||||
|       "proxy_url": "代理", | ||||
|       "proxy_url_placeholder": "此项可选,用于通过代理站来进行 API 调用,请输入代理站地址,格式为:https://domain.com。注意,这里所需要填入的代理地址仅会在实际请求时替换域名部分,如果你想填入 OpenAI SDK 中所要求的 Base URL,请使用 OpenAI 兼容渠道类型", | ||||
|       "base_url": "Base URL", | ||||
|       "base_url_placeholder": "OpenAPI SDK 中所要求的 Base URL", | ||||
|       "key": "密钥", | ||||
|       "key_placeholder": "请输入密钥", | ||||
|       "batch": "批量创建", | ||||
|   | ||||
| @@ -1,25 +1,10 @@ | ||||
| import React, { useEffect, useState } from 'react'; | ||||
| import { useTranslation } from 'react-i18next'; | ||||
| import { | ||||
|   Button, | ||||
|   Form, | ||||
|   Header, | ||||
|   Input, | ||||
|   Message, | ||||
|   Segment, | ||||
|   Card, | ||||
| } from 'semantic-ui-react'; | ||||
| import { useNavigate, useParams } from 'react-router-dom'; | ||||
| import { | ||||
|   API, | ||||
|   copy, | ||||
|   getChannelModels, | ||||
|   showError, | ||||
|   showInfo, | ||||
|   showSuccess, | ||||
|   verifyJSON, | ||||
| } from '../../helpers'; | ||||
| import { CHANNEL_OPTIONS } from '../../constants'; | ||||
| import React, {useEffect, useState} from 'react'; | ||||
| import {useTranslation} from 'react-i18next'; | ||||
| import {Button, Card, Form, Input, Message} from 'semantic-ui-react'; | ||||
| import {useNavigate, useParams} from 'react-router-dom'; | ||||
| import {API, copy, getChannelModels, showError, showInfo, showSuccess, verifyJSON,} from '../../helpers'; | ||||
| import {CHANNEL_OPTIONS} from '../../constants'; | ||||
| import {renderChannelTip} from '../../helpers/render'; | ||||
|  | ||||
| const MODEL_MAPPING_EXAMPLE = { | ||||
|   'gpt-3.5-turbo-0301': 'gpt-3.5-turbo', | ||||
| @@ -310,6 +295,7 @@ const EditChannel = () => { | ||||
|                 options={groupOptions} | ||||
|               /> | ||||
|             </Form.Field> | ||||
|             {renderChannelTip(inputs.type)} | ||||
|  | ||||
|             {/* Azure OpenAI specific fields */} | ||||
|             {inputs.type === 3 && ( | ||||
| @@ -353,6 +339,20 @@ const EditChannel = () => { | ||||
|             {inputs.type === 8 && ( | ||||
|               <Form.Field> | ||||
|                 <Form.Input | ||||
|                     required | ||||
|                     label={t('channel.edit.proxy_url')} | ||||
|                     name='base_url' | ||||
|                     placeholder={t('channel.edit.proxy_url_placeholder')} | ||||
|                     onChange={handleInputChange} | ||||
|                     value={inputs.base_url} | ||||
|                     autoComplete='new-password' | ||||
|                 /> | ||||
|               </Form.Field> | ||||
|             )} | ||||
|             {inputs.type === 50 && ( | ||||
|                 <Form.Field> | ||||
|                   <Form.Input | ||||
|                       required | ||||
|                   label={t('channel.edit.base_url')} | ||||
|                   name='base_url' | ||||
|                   placeholder={t('channel.edit.base_url_placeholder')} | ||||
| @@ -651,12 +651,13 @@ const EditChannel = () => { | ||||
|             {inputs.type !== 3 && | ||||
|               inputs.type !== 33 && | ||||
|               inputs.type !== 8 && | ||||
|                 inputs.type !== 50 && | ||||
|               inputs.type !== 22 && ( | ||||
|                 <Form.Field> | ||||
|                   <Form.Input | ||||
|                     label={t('channel.edit.base_url')} | ||||
|                       label={t('channel.edit.proxy_url')} | ||||
|                     name='base_url' | ||||
|                     placeholder={t('channel.edit.base_url_placeholder')} | ||||
|                       placeholder={t('channel.edit.proxy_url_placeholder')} | ||||
|                     onChange={handleInputChange} | ||||
|                     value={inputs.base_url} | ||||
|                     autoComplete='new-password' | ||||
|   | ||||
| @@ -1,6 +1,6 @@ | ||||
| import React, { useEffect, useState } from 'react'; | ||||
| import { useTranslation } from 'react-i18next'; | ||||
| import { Card, Grid } from 'semantic-ui-react'; | ||||
| import React, {useEffect, useState} from 'react'; | ||||
| import {useTranslation} from 'react-i18next'; | ||||
| import {Card, Grid} from 'semantic-ui-react'; | ||||
| import { | ||||
|   Bar, | ||||
|   BarChart, | ||||
| @@ -122,11 +122,11 @@ const Dashboard = () => { | ||||
|         ? new Date(Math.min(...dates.map((d) => new Date(d)))) | ||||
|         : new Date(); | ||||
|  | ||||
|     // 确保至少显示5天的数据 | ||||
|     const fiveDaysAgo = new Date(); | ||||
|     fiveDaysAgo.setDate(fiveDaysAgo.getDate() - 4); // -4是因为包含今天 | ||||
|     if (minDate > fiveDaysAgo) { | ||||
|       minDate = fiveDaysAgo; | ||||
|     // 确保至少显示7天的数据 | ||||
|     const sevenDaysAgo = new Date(); | ||||
|     sevenDaysAgo.setDate(sevenDaysAgo.getDate() - 6); // -6是因为包含今天 | ||||
|     if (minDate > sevenDaysAgo) { | ||||
|       minDate = sevenDaysAgo; | ||||
|     } | ||||
|  | ||||
|     // 生成所有日期 | ||||
| @@ -164,11 +164,11 @@ const Dashboard = () => { | ||||
|         ? new Date(Math.min(...dates.map((d) => new Date(d)))) | ||||
|         : new Date(); | ||||
|  | ||||
|     // 确保至少显示5天的数据 | ||||
|     const fiveDaysAgo = new Date(); | ||||
|     fiveDaysAgo.setDate(fiveDaysAgo.getDate() - 4); // -4是因为包含今天 | ||||
|     if (minDate > fiveDaysAgo) { | ||||
|       minDate = fiveDaysAgo; | ||||
|     // 确保至少显示7天的数据 | ||||
|     const sevenDaysAgo = new Date(); | ||||
|     sevenDaysAgo.setDate(sevenDaysAgo.getDate() - 6); // -6是因为包含今天 | ||||
|     if (minDate > sevenDaysAgo) { | ||||
|       minDate = sevenDaysAgo; | ||||
|     } | ||||
|  | ||||
|     // 生成所有日期 | ||||
|   | ||||
		Reference in New Issue
	
	Block a user