mirror of
				https://github.com/songquanpeng/one-api.git
				synced 2025-11-04 15:53:42 +08:00 
			
		
		
		
	Compare commits
	
		
			13 Commits
		
	
	
		
			v0.6.11-pr
			...
			391b969ecd
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					391b969ecd | ||
| 
						 | 
					8df4a2670b | ||
| 
						 | 
					7ac553541b | ||
| 
						 | 
					a5c517c27a | ||
| 
						 | 
					3f421c4f04 | ||
| 
						 | 
					1ce6a226f6 | ||
| 
						 | 
					cafd0a0327 | ||
| 
						 | 
					8b8cd03e85 | ||
| 
						 | 
					54c38de813 | ||
| 
						 | 
					d6284bf6b0 | ||
| 
						 | 
					df5d2ca93d | ||
| 
						 | 
					fef7ae048b | ||
| 
						 | 
					3f06711501 | 
@@ -72,7 +72,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用 
 | 
			
		||||
   + [x] [Anthropic Claude 系列模型](https://anthropic.com) (支持 AWS Claude)
 | 
			
		||||
   + [x] [Google PaLM2/Gemini 系列模型](https://developers.generativeai.google)
 | 
			
		||||
   + [x] [Mistral 系列模型](https://mistral.ai/)
 | 
			
		||||
   + [x] [字节跳动豆包大模型](https://console.volcengine.com/ark/region:ark+cn-beijing/model)
 | 
			
		||||
   + [x] [字节跳动豆包大模型(火山引擎)](https://www.volcengine.com/experience/ark?utm_term=202502dsinvite&ac=DSASUQY5&rc=2QXCA1VI)
 | 
			
		||||
   + [x] [百度文心一言系列模型](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html)
 | 
			
		||||
   + [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html)
 | 
			
		||||
   + [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html)
 | 
			
		||||
@@ -115,7 +115,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用 
 | 
			
		||||
19. 支持丰富的**自定义**设置,
 | 
			
		||||
    1. 支持自定义系统名称,logo 以及页脚。
 | 
			
		||||
    2. 支持自定义首页和关于页面,可以选择使用 HTML & Markdown 代码进行自定义,或者使用一个单独的网页通过 iframe 嵌入。
 | 
			
		||||
20. 支持通过系统访问令牌调用管理 API,进而**在无需二开的情况下扩展和自定义** One API 的功能,详情请参考此处 [API 文档](./docs/API.md)。。
 | 
			
		||||
20. 支持通过系统访问令牌调用管理 API,进而**在无需二开的情况下扩展和自定义** One API 的功能,详情请参考此处 [API 文档](./docs/API.md)。
 | 
			
		||||
21. 支持 Cloudflare Turnstile 用户校验。
 | 
			
		||||
22. 支持用户管理,支持**多种用户登录注册方式**:
 | 
			
		||||
    + 邮箱登录注册(支持注册邮箱白名单)以及通过邮箱进行密码重置。
 | 
			
		||||
 
 | 
			
		||||
@@ -14,10 +14,14 @@ var ModelList = []string{
 | 
			
		||||
	"qwen2-72b-instruct", "qwen2-57b-a14b-instruct", "qwen2-7b-instruct", "qwen2-1.5b-instruct", "qwen2-0.5b-instruct",
 | 
			
		||||
	"qwen1.5-110b-chat", "qwen1.5-72b-chat", "qwen1.5-32b-chat", "qwen1.5-14b-chat", "qwen1.5-7b-chat", "qwen1.5-1.8b-chat", "qwen1.5-0.5b-chat",
 | 
			
		||||
	"qwen-72b-chat", "qwen-14b-chat", "qwen-7b-chat", "qwen-1.8b-chat", "qwen-1.8b-longcontext-chat",
 | 
			
		||||
	"qvq-72b-preview",
 | 
			
		||||
	"qwen2.5-vl-72b-instruct", "qwen2.5-vl-7b-instruct", "qwen2.5-vl-2b-instruct", "qwen2.5-vl-1b-instruct", "qwen2.5-vl-0.5b-instruct",
 | 
			
		||||
	"qwen2-vl-7b-instruct", "qwen2-vl-2b-instruct", "qwen-vl-v1", "qwen-vl-chat-v1",
 | 
			
		||||
	"qwen2-audio-instruct", "qwen-audio-chat",
 | 
			
		||||
	"qwen2.5-math-72b-instruct", "qwen2.5-math-7b-instruct", "qwen2.5-math-1.5b-instruct", "qwen2-math-72b-instruct", "qwen2-math-7b-instruct", "qwen2-math-1.5b-instruct",
 | 
			
		||||
	"qwen2.5-coder-32b-instruct", "qwen2.5-coder-14b-instruct", "qwen2.5-coder-7b-instruct", "qwen2.5-coder-3b-instruct", "qwen2.5-coder-1.5b-instruct", "qwen2.5-coder-0.5b-instruct",
 | 
			
		||||
	"text-embedding-v1", "text-embedding-v3", "text-embedding-v2", "text-embedding-async-v2", "text-embedding-async-v1",
 | 
			
		||||
	"ali-stable-diffusion-xl", "ali-stable-diffusion-v1.5", "wanx-v1",
 | 
			
		||||
	"qwen-mt-plus", "qwen-mt-turbo",
 | 
			
		||||
	"deepseek-r1", "deepseek-v3", "deepseek-r1-distill-qwen-1.5b", "deepseek-r1-distill-qwen-7b", "deepseek-r1-distill-qwen-14b", "deepseek-r1-distill-qwen-32b", "deepseek-r1-distill-llama-8b", "deepseek-r1-distill-llama-70b",
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -5,9 +5,10 @@ import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/gin-gonic/gin"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/config"
 | 
			
		||||
	"github.com/songquanpeng/one-api/common/helper"
 | 
			
		||||
	channelhelper "github.com/songquanpeng/one-api/relay/adaptor"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/openai"
 | 
			
		||||
@@ -20,17 +21,12 @@ type Adaptor struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) Init(meta *meta.Meta) {
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
 | 
			
		||||
	var defaultVersion string
 | 
			
		||||
	switch meta.ActualModelName {
 | 
			
		||||
	case "gemini-2.0-flash-exp",
 | 
			
		||||
		"gemini-2.0-flash-thinking-exp",
 | 
			
		||||
		"gemini-2.0-flash-thinking-exp-01-21":
 | 
			
		||||
		defaultVersion = "v1beta"
 | 
			
		||||
	default:
 | 
			
		||||
	defaultVersion := config.GeminiVersion
 | 
			
		||||
	if strings.Contains(meta.ActualModelName, "gemini-2.0") ||
 | 
			
		||||
		strings.Contains(meta.ActualModelName, "gemini-1.5") {
 | 
			
		||||
		defaultVersion = "v1beta"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,11 +1,35 @@
 | 
			
		||||
package gemini
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// https://ai.google.dev/models/gemini
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"gemini-pro", "gemini-1.0-pro",
 | 
			
		||||
	"gemini-1.5-flash", "gemini-1.5-pro",
 | 
			
		||||
	"text-embedding-004", "aqa",
 | 
			
		||||
	"gemini-2.0-flash-exp",
 | 
			
		||||
	"gemini-2.0-flash-thinking-exp", "gemini-2.0-flash-thinking-exp-01-21",
 | 
			
		||||
var ModelList = geminiv2.ModelList
 | 
			
		||||
 | 
			
		||||
// ModelsSupportSystemInstruction is the list of models that support system instruction.
 | 
			
		||||
//
 | 
			
		||||
// https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/system-instructions
 | 
			
		||||
var ModelsSupportSystemInstruction = []string{
 | 
			
		||||
	// "gemini-1.0-pro-002",
 | 
			
		||||
	// "gemini-1.5-flash", "gemini-1.5-flash-001", "gemini-1.5-flash-002",
 | 
			
		||||
	// "gemini-1.5-flash-8b",
 | 
			
		||||
	// "gemini-1.5-pro", "gemini-1.5-pro-001", "gemini-1.5-pro-002",
 | 
			
		||||
	// "gemini-1.5-pro-experimental",
 | 
			
		||||
	"gemini-2.0-flash", "gemini-2.0-flash-exp",
 | 
			
		||||
	"gemini-2.0-flash-thinking-exp-01-21",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// IsModelSupportSystemInstruction check if the model support system instruction.
 | 
			
		||||
//
 | 
			
		||||
// Because the main version of Go is 1.20, slice.Contains cannot be used
 | 
			
		||||
func IsModelSupportSystemInstruction(model string) bool {
 | 
			
		||||
	for _, m := range ModelsSupportSystemInstruction {
 | 
			
		||||
		if m == model {
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -132,9 +132,16 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
 | 
			
		||||
		}
 | 
			
		||||
		// Converting system prompt to prompt from user for the same reason
 | 
			
		||||
		if content.Role == "system" {
 | 
			
		||||
			content.Role = "user"
 | 
			
		||||
			shouldAddDummyModelMessage = true
 | 
			
		||||
			if IsModelSupportSystemInstruction(textRequest.Model) {
 | 
			
		||||
				geminiRequest.SystemInstruction = &content
 | 
			
		||||
				geminiRequest.SystemInstruction.Role = ""
 | 
			
		||||
				continue
 | 
			
		||||
			} else {
 | 
			
		||||
				content.Role = "user"
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		geminiRequest.Contents = append(geminiRequest.Contents, content)
 | 
			
		||||
 | 
			
		||||
		// If a system message is the last message, we need to add a dummy model message to make gemini happy
 | 
			
		||||
 
 | 
			
		||||
@@ -5,6 +5,7 @@ type ChatRequest struct {
 | 
			
		||||
	SafetySettings    []ChatSafetySettings `json:"safety_settings,omitempty"`
 | 
			
		||||
	GenerationConfig  ChatGenerationConfig `json:"generation_config,omitempty"`
 | 
			
		||||
	Tools             []ChatTools          `json:"tools,omitempty"`
 | 
			
		||||
	SystemInstruction *ChatContent         `json:"system_instruction,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type EmbeddingRequest struct {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										15
									
								
								relay/adaptor/geminiv2/constants.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								relay/adaptor/geminiv2/constants.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,15 @@
 | 
			
		||||
package geminiv2
 | 
			
		||||
 | 
			
		||||
// https://ai.google.dev/models/gemini
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"gemini-pro", "gemini-1.0-pro",
 | 
			
		||||
	// "gemma-2-2b-it", "gemma-2-9b-it", "gemma-2-27b-it",
 | 
			
		||||
	"gemini-1.5-flash", "gemini-1.5-flash-8b",
 | 
			
		||||
	"gemini-1.5-pro", "gemini-1.5-pro-experimental",
 | 
			
		||||
	"text-embedding-004", "aqa",
 | 
			
		||||
	"gemini-2.0-flash", "gemini-2.0-flash-exp",
 | 
			
		||||
	"gemini-2.0-flash-lite-preview-02-05",
 | 
			
		||||
	"gemini-2.0-flash-thinking-exp-01-21",
 | 
			
		||||
	"gemini-2.0-pro-exp-02-05",
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										14
									
								
								relay/adaptor/geminiv2/main.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								relay/adaptor/geminiv2/main.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,14 @@
 | 
			
		||||
package geminiv2
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/meta"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func GetRequestURL(meta *meta.Meta) (string, error) {
 | 
			
		||||
	baseURL := strings.TrimSuffix(meta.BaseURL, "/")
 | 
			
		||||
	requestPath := strings.TrimPrefix(meta.RequestURLPath, "/v1")
 | 
			
		||||
	return fmt.Sprintf("%s%s", baseURL, requestPath), nil
 | 
			
		||||
}
 | 
			
		||||
@@ -13,6 +13,7 @@ import (
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/alibailian"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/baiduv2"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/doubao"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/minimax"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/novita"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/channeltype"
 | 
			
		||||
@@ -59,6 +60,8 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
 | 
			
		||||
		return baiduv2.GetRequestURL(meta)
 | 
			
		||||
	case channeltype.AliBailian:
 | 
			
		||||
		return alibailian.GetRequestURL(meta)
 | 
			
		||||
	case channeltype.GeminiOpenAICompatible:
 | 
			
		||||
		return geminiv2.GetRequestURL(meta)
 | 
			
		||||
	default:
 | 
			
		||||
		return GetFullRequestURL(meta.BaseURL, meta.RequestURLPath, meta.ChannelType), nil
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -7,6 +7,7 @@ import (
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/baiduv2"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/deepseek"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/doubao"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/groq"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/lingyiwanwu"
 | 
			
		||||
	"github.com/songquanpeng/one-api/relay/adaptor/minimax"
 | 
			
		||||
@@ -82,6 +83,8 @@ func GetCompatibleChannelMeta(channelType int) (string, []string) {
 | 
			
		||||
		return "openrouter", openrouter.ModelList
 | 
			
		||||
	case channeltype.AliBailian:
 | 
			
		||||
		return "alibailian", alibailian.ModelList
 | 
			
		||||
	case channeltype.GeminiOpenAICompatible:
 | 
			
		||||
		return "geminiv2", geminiv2.ModelList
 | 
			
		||||
	default:
 | 
			
		||||
		return "openai", ModelList
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -17,6 +17,9 @@ func ResponseText2Usage(responseText string, modelName string, promptTokens int)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func GetFullRequestURL(baseURL string, requestURL string, channelType int) string {
 | 
			
		||||
	if channelType == channeltype.OpenAICompatible {
 | 
			
		||||
		return fmt.Sprintf("%s%s", strings.TrimSuffix(baseURL, "/"), strings.TrimPrefix(requestURL, "/v1"))
 | 
			
		||||
	}
 | 
			
		||||
	fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
 | 
			
		||||
 | 
			
		||||
	if strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") {
 | 
			
		||||
 
 | 
			
		||||
@@ -1,20 +1,235 @@
 | 
			
		||||
package openrouter
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"openai/gpt-3.5-turbo",
 | 
			
		||||
	"openai/chatgpt-4o-latest",
 | 
			
		||||
	"openai/o1",
 | 
			
		||||
	"openai/o1-preview",
 | 
			
		||||
	"openai/o1-mini",
 | 
			
		||||
	"openai/o3-mini",
 | 
			
		||||
	"google/gemini-2.0-flash-001",
 | 
			
		||||
	"google/gemini-2.0-flash-thinking-exp:free",
 | 
			
		||||
	"google/gemini-2.0-flash-lite-preview-02-05:free",
 | 
			
		||||
	"google/gemini-2.0-pro-exp-02-05:free",
 | 
			
		||||
	"google/gemini-flash-1.5-8b",
 | 
			
		||||
	"anthropic/claude-3.5-sonnet",
 | 
			
		||||
	"01-ai/yi-large",
 | 
			
		||||
	"aetherwiing/mn-starcannon-12b",
 | 
			
		||||
	"ai21/jamba-1-5-large",
 | 
			
		||||
	"ai21/jamba-1-5-mini",
 | 
			
		||||
	"ai21/jamba-instruct",
 | 
			
		||||
	"aion-labs/aion-1.0",
 | 
			
		||||
	"aion-labs/aion-1.0-mini",
 | 
			
		||||
	"aion-labs/aion-rp-llama-3.1-8b",
 | 
			
		||||
	"allenai/llama-3.1-tulu-3-405b",
 | 
			
		||||
	"alpindale/goliath-120b",
 | 
			
		||||
	"alpindale/magnum-72b",
 | 
			
		||||
	"amazon/nova-lite-v1",
 | 
			
		||||
	"amazon/nova-micro-v1",
 | 
			
		||||
	"amazon/nova-pro-v1",
 | 
			
		||||
	"anthracite-org/magnum-v2-72b",
 | 
			
		||||
	"anthracite-org/magnum-v4-72b",
 | 
			
		||||
	"anthropic/claude-2",
 | 
			
		||||
	"anthropic/claude-2.0",
 | 
			
		||||
	"anthropic/claude-2.0:beta",
 | 
			
		||||
	"anthropic/claude-2.1",
 | 
			
		||||
	"anthropic/claude-2.1:beta",
 | 
			
		||||
	"anthropic/claude-2:beta",
 | 
			
		||||
	"anthropic/claude-3-haiku",
 | 
			
		||||
	"anthropic/claude-3-haiku:beta",
 | 
			
		||||
	"anthropic/claude-3-opus",
 | 
			
		||||
	"anthropic/claude-3-opus:beta",
 | 
			
		||||
	"anthropic/claude-3-sonnet",
 | 
			
		||||
	"anthropic/claude-3-sonnet:beta",
 | 
			
		||||
	"anthropic/claude-3.5-haiku",
 | 
			
		||||
	"deepseek/deepseek-r1:free",
 | 
			
		||||
	"anthropic/claude-3.5-haiku-20241022",
 | 
			
		||||
	"anthropic/claude-3.5-haiku-20241022:beta",
 | 
			
		||||
	"anthropic/claude-3.5-haiku:beta",
 | 
			
		||||
	"anthropic/claude-3.5-sonnet",
 | 
			
		||||
	"anthropic/claude-3.5-sonnet-20240620",
 | 
			
		||||
	"anthropic/claude-3.5-sonnet-20240620:beta",
 | 
			
		||||
	"anthropic/claude-3.5-sonnet:beta",
 | 
			
		||||
	"cognitivecomputations/dolphin-mixtral-8x22b",
 | 
			
		||||
	"cognitivecomputations/dolphin-mixtral-8x7b",
 | 
			
		||||
	"cohere/command",
 | 
			
		||||
	"cohere/command-r",
 | 
			
		||||
	"cohere/command-r-03-2024",
 | 
			
		||||
	"cohere/command-r-08-2024",
 | 
			
		||||
	"cohere/command-r-plus",
 | 
			
		||||
	"cohere/command-r-plus-04-2024",
 | 
			
		||||
	"cohere/command-r-plus-08-2024",
 | 
			
		||||
	"cohere/command-r7b-12-2024",
 | 
			
		||||
	"databricks/dbrx-instruct",
 | 
			
		||||
	"deepseek/deepseek-chat",
 | 
			
		||||
	"deepseek/deepseek-chat-v2.5",
 | 
			
		||||
	"deepseek/deepseek-chat:free",
 | 
			
		||||
	"deepseek/deepseek-r1",
 | 
			
		||||
	"deepseek/deepseek-r1-distill-llama-70b",
 | 
			
		||||
	"deepseek/deepseek-r1-distill-llama-70b:free",
 | 
			
		||||
	"deepseek/deepseek-r1-distill-llama-8b",
 | 
			
		||||
	"deepseek/deepseek-r1-distill-qwen-1.5b",
 | 
			
		||||
	"deepseek/deepseek-r1-distill-qwen-14b",
 | 
			
		||||
	"deepseek/deepseek-r1-distill-qwen-32b",
 | 
			
		||||
	"deepseek/deepseek-r1:free",
 | 
			
		||||
	"eva-unit-01/eva-llama-3.33-70b",
 | 
			
		||||
	"eva-unit-01/eva-qwen-2.5-32b",
 | 
			
		||||
	"eva-unit-01/eva-qwen-2.5-72b",
 | 
			
		||||
	"google/gemini-2.0-flash-001",
 | 
			
		||||
	"google/gemini-2.0-flash-exp:free",
 | 
			
		||||
	"google/gemini-2.0-flash-lite-preview-02-05:free",
 | 
			
		||||
	"google/gemini-2.0-flash-thinking-exp-1219:free",
 | 
			
		||||
	"google/gemini-2.0-flash-thinking-exp:free",
 | 
			
		||||
	"google/gemini-2.0-pro-exp-02-05:free",
 | 
			
		||||
	"google/gemini-exp-1206:free",
 | 
			
		||||
	"google/gemini-flash-1.5",
 | 
			
		||||
	"google/gemini-flash-1.5-8b",
 | 
			
		||||
	"google/gemini-flash-1.5-8b-exp",
 | 
			
		||||
	"google/gemini-pro",
 | 
			
		||||
	"google/gemini-pro-1.5",
 | 
			
		||||
	"google/gemini-pro-vision",
 | 
			
		||||
	"google/gemma-2-27b-it",
 | 
			
		||||
	"google/gemma-2-9b-it",
 | 
			
		||||
	"google/gemma-2-9b-it:free",
 | 
			
		||||
	"google/gemma-7b-it",
 | 
			
		||||
	"google/learnlm-1.5-pro-experimental:free",
 | 
			
		||||
	"google/palm-2-chat-bison",
 | 
			
		||||
	"google/palm-2-chat-bison-32k",
 | 
			
		||||
	"google/palm-2-codechat-bison",
 | 
			
		||||
	"google/palm-2-codechat-bison-32k",
 | 
			
		||||
	"gryphe/mythomax-l2-13b",
 | 
			
		||||
	"gryphe/mythomax-l2-13b:free",
 | 
			
		||||
	"huggingfaceh4/zephyr-7b-beta:free",
 | 
			
		||||
	"infermatic/mn-inferor-12b",
 | 
			
		||||
	"inflection/inflection-3-pi",
 | 
			
		||||
	"inflection/inflection-3-productivity",
 | 
			
		||||
	"jondurbin/airoboros-l2-70b",
 | 
			
		||||
	"liquid/lfm-3b",
 | 
			
		||||
	"liquid/lfm-40b",
 | 
			
		||||
	"liquid/lfm-7b",
 | 
			
		||||
	"mancer/weaver",
 | 
			
		||||
	"meta-llama/llama-2-13b-chat",
 | 
			
		||||
	"meta-llama/llama-2-70b-chat",
 | 
			
		||||
	"meta-llama/llama-3-70b-instruct",
 | 
			
		||||
	"meta-llama/llama-3-8b-instruct",
 | 
			
		||||
	"meta-llama/llama-3-8b-instruct:free",
 | 
			
		||||
	"meta-llama/llama-3.1-405b",
 | 
			
		||||
	"meta-llama/llama-3.1-405b-instruct",
 | 
			
		||||
	"meta-llama/llama-3.1-70b-instruct",
 | 
			
		||||
	"meta-llama/llama-3.1-8b-instruct",
 | 
			
		||||
	"meta-llama/llama-3.2-11b-vision-instruct",
 | 
			
		||||
	"meta-llama/llama-3.2-11b-vision-instruct:free",
 | 
			
		||||
	"meta-llama/llama-3.2-1b-instruct",
 | 
			
		||||
	"meta-llama/llama-3.2-3b-instruct",
 | 
			
		||||
	"meta-llama/llama-3.2-90b-vision-instruct",
 | 
			
		||||
	"meta-llama/llama-3.3-70b-instruct",
 | 
			
		||||
	"meta-llama/llama-3.3-70b-instruct:free",
 | 
			
		||||
	"meta-llama/llama-guard-2-8b",
 | 
			
		||||
	"microsoft/phi-3-medium-128k-instruct",
 | 
			
		||||
	"microsoft/phi-3-medium-128k-instruct:free",
 | 
			
		||||
	"microsoft/phi-3-mini-128k-instruct",
 | 
			
		||||
	"microsoft/phi-3-mini-128k-instruct:free",
 | 
			
		||||
	"microsoft/phi-3.5-mini-128k-instruct",
 | 
			
		||||
	"microsoft/phi-4",
 | 
			
		||||
	"microsoft/wizardlm-2-7b",
 | 
			
		||||
	"microsoft/wizardlm-2-8x22b",
 | 
			
		||||
	"minimax/minimax-01",
 | 
			
		||||
	"mistralai/codestral-2501",
 | 
			
		||||
	"mistralai/codestral-mamba",
 | 
			
		||||
	"mistralai/ministral-3b",
 | 
			
		||||
	"mistralai/ministral-8b",
 | 
			
		||||
	"mistralai/mistral-7b-instruct",
 | 
			
		||||
	"mistralai/mistral-7b-instruct-v0.1",
 | 
			
		||||
	"mistralai/mistral-7b-instruct-v0.3",
 | 
			
		||||
	"mistralai/mistral-7b-instruct:free",
 | 
			
		||||
	"mistralai/mistral-large",
 | 
			
		||||
	"mistralai/mistral-large-2407",
 | 
			
		||||
	"mistralai/mistral-large-2411",
 | 
			
		||||
	"mistralai/mistral-medium",
 | 
			
		||||
	"mistralai/mistral-nemo",
 | 
			
		||||
	"mistralai/mistral-nemo:free",
 | 
			
		||||
	"mistralai/mistral-small",
 | 
			
		||||
	"mistralai/mistral-small-24b-instruct-2501",
 | 
			
		||||
	"mistralai/mistral-small-24b-instruct-2501:free",
 | 
			
		||||
	"mistralai/mistral-tiny",
 | 
			
		||||
	"mistralai/mixtral-8x22b-instruct",
 | 
			
		||||
	"mistralai/mixtral-8x7b",
 | 
			
		||||
	"mistralai/mixtral-8x7b-instruct",
 | 
			
		||||
	"mistralai/pixtral-12b",
 | 
			
		||||
	"mistralai/pixtral-large-2411",
 | 
			
		||||
	"neversleep/llama-3-lumimaid-70b",
 | 
			
		||||
	"neversleep/llama-3-lumimaid-8b",
 | 
			
		||||
	"neversleep/llama-3-lumimaid-8b:extended",
 | 
			
		||||
	"neversleep/llama-3.1-lumimaid-70b",
 | 
			
		||||
	"neversleep/llama-3.1-lumimaid-8b",
 | 
			
		||||
	"neversleep/noromaid-20b",
 | 
			
		||||
	"nothingiisreal/mn-celeste-12b",
 | 
			
		||||
	"nousresearch/hermes-2-pro-llama-3-8b",
 | 
			
		||||
	"nousresearch/hermes-3-llama-3.1-405b",
 | 
			
		||||
	"nousresearch/hermes-3-llama-3.1-70b",
 | 
			
		||||
	"nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
 | 
			
		||||
	"nousresearch/nous-hermes-llama2-13b",
 | 
			
		||||
	"nvidia/llama-3.1-nemotron-70b-instruct",
 | 
			
		||||
	"nvidia/llama-3.1-nemotron-70b-instruct:free",
 | 
			
		||||
	"openai/chatgpt-4o-latest",
 | 
			
		||||
	"openai/gpt-3.5-turbo",
 | 
			
		||||
	"openai/gpt-3.5-turbo-0125",
 | 
			
		||||
	"openai/gpt-3.5-turbo-0613",
 | 
			
		||||
	"openai/gpt-3.5-turbo-1106",
 | 
			
		||||
	"openai/gpt-3.5-turbo-16k",
 | 
			
		||||
	"openai/gpt-3.5-turbo-instruct",
 | 
			
		||||
	"openai/gpt-4",
 | 
			
		||||
	"openai/gpt-4-0314",
 | 
			
		||||
	"openai/gpt-4-1106-preview",
 | 
			
		||||
	"openai/gpt-4-32k",
 | 
			
		||||
	"openai/gpt-4-32k-0314",
 | 
			
		||||
	"openai/gpt-4-turbo",
 | 
			
		||||
	"openai/gpt-4-turbo-preview",
 | 
			
		||||
	"openai/gpt-4o",
 | 
			
		||||
	"openai/gpt-4o-2024-05-13",
 | 
			
		||||
	"openai/gpt-4o-2024-08-06",
 | 
			
		||||
	"openai/gpt-4o-2024-11-20",
 | 
			
		||||
	"openai/gpt-4o-mini",
 | 
			
		||||
	"openai/gpt-4o-mini-2024-07-18",
 | 
			
		||||
	"openai/gpt-4o:extended",
 | 
			
		||||
	"openai/o1",
 | 
			
		||||
	"openai/o1-mini",
 | 
			
		||||
	"openai/o1-mini-2024-09-12",
 | 
			
		||||
	"openai/o1-preview",
 | 
			
		||||
	"openai/o1-preview-2024-09-12",
 | 
			
		||||
	"openai/o3-mini",
 | 
			
		||||
	"openai/o3-mini-high",
 | 
			
		||||
	"openchat/openchat-7b",
 | 
			
		||||
	"openchat/openchat-7b:free",
 | 
			
		||||
	"openrouter/auto",
 | 
			
		||||
	"perplexity/llama-3.1-sonar-huge-128k-online",
 | 
			
		||||
	"perplexity/llama-3.1-sonar-large-128k-chat",
 | 
			
		||||
	"perplexity/llama-3.1-sonar-large-128k-online",
 | 
			
		||||
	"perplexity/llama-3.1-sonar-small-128k-chat",
 | 
			
		||||
	"perplexity/llama-3.1-sonar-small-128k-online",
 | 
			
		||||
	"perplexity/sonar",
 | 
			
		||||
	"perplexity/sonar-reasoning",
 | 
			
		||||
	"pygmalionai/mythalion-13b",
 | 
			
		||||
	"qwen/qvq-72b-preview",
 | 
			
		||||
	"qwen/qwen-2-72b-instruct",
 | 
			
		||||
	"qwen/qwen-2-7b-instruct",
 | 
			
		||||
	"qwen/qwen-2-7b-instruct:free",
 | 
			
		||||
	"qwen/qwen-2-vl-72b-instruct",
 | 
			
		||||
	"qwen/qwen-2-vl-7b-instruct",
 | 
			
		||||
	"qwen/qwen-2.5-72b-instruct",
 | 
			
		||||
	"qwen/qwen-2.5-7b-instruct",
 | 
			
		||||
	"qwen/qwen-2.5-coder-32b-instruct",
 | 
			
		||||
	"qwen/qwen-max",
 | 
			
		||||
	"qwen/qwen-plus",
 | 
			
		||||
	"qwen/qwen-turbo",
 | 
			
		||||
	"qwen/qwen-vl-plus:free",
 | 
			
		||||
	"qwen/qwen2.5-vl-72b-instruct:free",
 | 
			
		||||
	"qwen/qwq-32b-preview",
 | 
			
		||||
	"raifle/sorcererlm-8x22b",
 | 
			
		||||
	"sao10k/fimbulvetr-11b-v2",
 | 
			
		||||
	"sao10k/l3-euryale-70b",
 | 
			
		||||
	"sao10k/l3-lunaris-8b",
 | 
			
		||||
	"sao10k/l3.1-70b-hanami-x1",
 | 
			
		||||
	"sao10k/l3.1-euryale-70b",
 | 
			
		||||
	"sao10k/l3.3-euryale-70b",
 | 
			
		||||
	"sophosympatheia/midnight-rose-70b",
 | 
			
		||||
	"sophosympatheia/rogue-rose-103b-v0.2:free",
 | 
			
		||||
	"teknium/openhermes-2.5-mistral-7b",
 | 
			
		||||
	"thedrummer/rocinante-12b",
 | 
			
		||||
	"thedrummer/unslopnemo-12b",
 | 
			
		||||
	"undi95/remm-slerp-l2-13b",
 | 
			
		||||
	"undi95/toppy-m-7b",
 | 
			
		||||
	"undi95/toppy-m-7b:free",
 | 
			
		||||
	"x-ai/grok-2-1212",
 | 
			
		||||
	"x-ai/grok-2-vision-1212",
 | 
			
		||||
	"x-ai/grok-beta",
 | 
			
		||||
	"x-ai/grok-vision-beta",
 | 
			
		||||
	"xwin-lm/xwin-lm-70b",
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -16,10 +16,12 @@ import (
 | 
			
		||||
 | 
			
		||||
var ModelList = []string{
 | 
			
		||||
	"gemini-pro", "gemini-pro-vision",
 | 
			
		||||
	"gemini-1.5-pro-001", "gemini-1.5-flash-001",
 | 
			
		||||
	"gemini-1.5-pro-002", "gemini-1.5-flash-002",
 | 
			
		||||
	"gemini-2.0-flash-exp",
 | 
			
		||||
	"gemini-2.0-flash-thinking-exp", "gemini-2.0-flash-thinking-exp-01-21",
 | 
			
		||||
	"gemini-exp-1206",
 | 
			
		||||
	"gemini-1.5-pro-001", "gemini-1.5-pro-002",
 | 
			
		||||
	"gemini-1.5-flash-001", "gemini-1.5-flash-002",
 | 
			
		||||
	"gemini-2.0-flash-exp", "gemini-2.0-flash-001",
 | 
			
		||||
	"gemini-2.0-flash-lite-preview-02-05",
 | 
			
		||||
	"gemini-2.0-flash-thinking-exp-01-21",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Adaptor struct {
 | 
			
		||||
 
 | 
			
		||||
@@ -59,6 +59,8 @@ var ModelRatio = map[string]float64{
 | 
			
		||||
	"o1-preview-2024-09-12":   7.5,
 | 
			
		||||
	"o1-mini":                 1.5, // $3.00 / 1M input tokens
 | 
			
		||||
	"o1-mini-2024-09-12":      1.5,
 | 
			
		||||
	"o3-mini":                 1.5, // $3.00 / 1M input tokens
 | 
			
		||||
	"o3-mini-2025-01-31":      1.5,
 | 
			
		||||
	"davinci-002":             1,   // $0.002 / 1K tokens
 | 
			
		||||
	"babbage-002":             0.2, // $0.0004 / 1K tokens
 | 
			
		||||
	"text-ada-001":            0.2,
 | 
			
		||||
@@ -115,15 +117,24 @@ var ModelRatio = map[string]float64{
 | 
			
		||||
	"bge-large-en":       0.002 * RMB,
 | 
			
		||||
	"tao-8k":             0.002 * RMB,
 | 
			
		||||
	// https://ai.google.dev/pricing
 | 
			
		||||
	"gemini-pro":                          1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
 | 
			
		||||
	"gemini-1.0-pro":                      1,
 | 
			
		||||
	"gemini-1.5-pro":                      1,
 | 
			
		||||
	"gemini-1.5-pro-001":                  1,
 | 
			
		||||
	"gemini-1.5-flash":                    1,
 | 
			
		||||
	"gemini-1.5-flash-001":                1,
 | 
			
		||||
	"gemini-2.0-flash-exp":                1,
 | 
			
		||||
	"gemini-2.0-flash-thinking-exp":       1,
 | 
			
		||||
	"gemini-2.0-flash-thinking-exp-01-21": 1,
 | 
			
		||||
	// https://cloud.google.com/vertex-ai/generative-ai/pricing
 | 
			
		||||
	// "gemma-2-2b-it":                       0,
 | 
			
		||||
	// "gemma-2-9b-it":                       0,
 | 
			
		||||
	// "gemma-2-27b-it":                      0,
 | 
			
		||||
	"gemini-pro":                          0.25 * MILLI_USD, // $0.00025 / 1k characters -> $0.001 / 1k tokens
 | 
			
		||||
	"gemini-1.0-pro":                      0.125 * MILLI_USD,
 | 
			
		||||
	"gemini-1.5-pro":                      1.25 * MILLI_USD,
 | 
			
		||||
	"gemini-1.5-pro-001":                  1.25 * MILLI_USD,
 | 
			
		||||
	"gemini-1.5-pro-experimental":         1.25 * MILLI_USD,
 | 
			
		||||
	"gemini-1.5-flash":                    0.075 * MILLI_USD,
 | 
			
		||||
	"gemini-1.5-flash-001":                0.075 * MILLI_USD,
 | 
			
		||||
	"gemini-1.5-flash-8b":                 0.0375 * MILLI_USD,
 | 
			
		||||
	"gemini-2.0-flash-exp":                0.075 * MILLI_USD,
 | 
			
		||||
	"gemini-2.0-flash":                    0.15 * MILLI_USD,
 | 
			
		||||
	"gemini-2.0-flash-001":                0.15 * MILLI_USD,
 | 
			
		||||
	"gemini-2.0-flash-lite-preview-02-05": 0.075 * MILLI_USD,
 | 
			
		||||
	"gemini-2.0-flash-thinking-exp-01-21": 0.075 * MILLI_USD,
 | 
			
		||||
	"gemini-2.0-pro-exp-02-05":            1.25 * MILLI_USD,
 | 
			
		||||
	"aqa":                                 1,
 | 
			
		||||
	// https://open.bigmodel.cn/pricing
 | 
			
		||||
	"glm-zero-preview": 0.01 * RMB,
 | 
			
		||||
@@ -150,79 +161,93 @@ var ModelRatio = map[string]float64{
 | 
			
		||||
	"embedding-2":      0.0005 * RMB,
 | 
			
		||||
	"embedding-3":      0.0005 * RMB,
 | 
			
		||||
	// https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-thousand-questions-metering-and-billing
 | 
			
		||||
	"qwen-turbo":                  1.4286, // ¥0.02 / 1k tokens
 | 
			
		||||
	"qwen-turbo-latest":           1.4286,
 | 
			
		||||
	"qwen-plus":                   1.4286,
 | 
			
		||||
	"qwen-plus-latest":            1.4286,
 | 
			
		||||
	"qwen-max":                    1.4286,
 | 
			
		||||
	"qwen-max-latest":             1.4286,
 | 
			
		||||
	"qwen-max-longcontext":        1.4286,
 | 
			
		||||
	"qwen-vl-max":                 1.4286,
 | 
			
		||||
	"qwen-vl-max-latest":          1.4286,
 | 
			
		||||
	"qwen-vl-plus":                1.4286,
 | 
			
		||||
	"qwen-vl-plus-latest":         1.4286,
 | 
			
		||||
	"qwen-vl-ocr":                 1.4286,
 | 
			
		||||
	"qwen-vl-ocr-latest":          1.4286,
 | 
			
		||||
	"qwen-turbo":                    0.0003 * RMB,
 | 
			
		||||
	"qwen-turbo-latest":             0.0003 * RMB,
 | 
			
		||||
	"qwen-plus":                     0.0008 * RMB,
 | 
			
		||||
	"qwen-plus-latest":              0.0008 * RMB,
 | 
			
		||||
	"qwen-max":                      0.0024 * RMB,
 | 
			
		||||
	"qwen-max-latest":               0.0024 * RMB,
 | 
			
		||||
	"qwen-max-longcontext":          0.0005 * RMB,
 | 
			
		||||
	"qwen-vl-max":                   0.003 * RMB,
 | 
			
		||||
	"qwen-vl-max-latest":            0.003 * RMB,
 | 
			
		||||
	"qwen-vl-plus":                  0.0015 * RMB,
 | 
			
		||||
	"qwen-vl-plus-latest":           0.0015 * RMB,
 | 
			
		||||
	"qwen-vl-ocr":                   0.005 * RMB,
 | 
			
		||||
	"qwen-vl-ocr-latest":            0.005 * RMB,
 | 
			
		||||
	"qwen-audio-turbo":              1.4286,
 | 
			
		||||
	"qwen-math-plus":              1.4286,
 | 
			
		||||
	"qwen-math-plus-latest":       1.4286,
 | 
			
		||||
	"qwen-math-turbo":             1.4286,
 | 
			
		||||
	"qwen-math-turbo-latest":      1.4286,
 | 
			
		||||
	"qwen-coder-plus":             1.4286,
 | 
			
		||||
	"qwen-coder-plus-latest":      1.4286,
 | 
			
		||||
	"qwen-coder-turbo":            1.4286,
 | 
			
		||||
	"qwen-coder-turbo-latest":     1.4286,
 | 
			
		||||
	"qwq-32b-preview":             1.4286,
 | 
			
		||||
	"qwen2.5-72b-instruct":        1.4286,
 | 
			
		||||
	"qwen2.5-32b-instruct":        1.4286,
 | 
			
		||||
	"qwen2.5-14b-instruct":        1.4286,
 | 
			
		||||
	"qwen2.5-7b-instruct":         1.4286,
 | 
			
		||||
	"qwen2.5-3b-instruct":         1.4286,
 | 
			
		||||
	"qwen2.5-1.5b-instruct":       1.4286,
 | 
			
		||||
	"qwen2.5-0.5b-instruct":       1.4286,
 | 
			
		||||
	"qwen2-72b-instruct":          1.4286,
 | 
			
		||||
	"qwen2-57b-a14b-instruct":     1.4286,
 | 
			
		||||
	"qwen2-7b-instruct":           1.4286,
 | 
			
		||||
	"qwen2-1.5b-instruct":         1.4286,
 | 
			
		||||
	"qwen2-0.5b-instruct":         1.4286,
 | 
			
		||||
	"qwen1.5-110b-chat":           1.4286,
 | 
			
		||||
	"qwen1.5-72b-chat":            1.4286,
 | 
			
		||||
	"qwen1.5-32b-chat":            1.4286,
 | 
			
		||||
	"qwen1.5-14b-chat":            1.4286,
 | 
			
		||||
	"qwen1.5-7b-chat":             1.4286,
 | 
			
		||||
	"qwen1.5-1.8b-chat":           1.4286,
 | 
			
		||||
	"qwen1.5-0.5b-chat":           1.4286,
 | 
			
		||||
	"qwen-72b-chat":               1.4286,
 | 
			
		||||
	"qwen-14b-chat":               1.4286,
 | 
			
		||||
	"qwen-7b-chat":                1.4286,
 | 
			
		||||
	"qwen-1.8b-chat":              1.4286,
 | 
			
		||||
	"qwen-1.8b-longcontext-chat":  1.4286,
 | 
			
		||||
	"qwen2-vl-7b-instruct":        1.4286,
 | 
			
		||||
	"qwen2-vl-2b-instruct":        1.4286,
 | 
			
		||||
	"qwen-vl-v1":                  1.4286,
 | 
			
		||||
	"qwen-vl-chat-v1":             1.4286,
 | 
			
		||||
	"qwen2-audio-instruct":        1.4286,
 | 
			
		||||
	"qwen-audio-chat":             1.4286,
 | 
			
		||||
	"qwen2.5-math-72b-instruct":   1.4286,
 | 
			
		||||
	"qwen2.5-math-7b-instruct":    1.4286,
 | 
			
		||||
	"qwen2.5-math-1.5b-instruct":  1.4286,
 | 
			
		||||
	"qwen2-math-72b-instruct":     1.4286,
 | 
			
		||||
	"qwen2-math-7b-instruct":      1.4286,
 | 
			
		||||
	"qwen2-math-1.5b-instruct":    1.4286,
 | 
			
		||||
	"qwen2.5-coder-32b-instruct":  1.4286,
 | 
			
		||||
	"qwen2.5-coder-14b-instruct":  1.4286,
 | 
			
		||||
	"qwen2.5-coder-7b-instruct":   1.4286,
 | 
			
		||||
	"qwen2.5-coder-3b-instruct":   1.4286,
 | 
			
		||||
	"qwen2.5-coder-1.5b-instruct": 1.4286,
 | 
			
		||||
	"qwen2.5-coder-0.5b-instruct": 1.4286,
 | 
			
		||||
	"text-embedding-v1":           0.05, // ¥0.0007 / 1k tokens
 | 
			
		||||
	"text-embedding-v3":           0.05,
 | 
			
		||||
	"text-embedding-v2":           0.05,
 | 
			
		||||
	"text-embedding-async-v2":     0.05,
 | 
			
		||||
	"text-embedding-async-v1":     0.05,
 | 
			
		||||
	"qwen-math-plus":                0.004 * RMB,
 | 
			
		||||
	"qwen-math-plus-latest":         0.004 * RMB,
 | 
			
		||||
	"qwen-math-turbo":               0.002 * RMB,
 | 
			
		||||
	"qwen-math-turbo-latest":        0.002 * RMB,
 | 
			
		||||
	"qwen-coder-plus":               0.0035 * RMB,
 | 
			
		||||
	"qwen-coder-plus-latest":        0.0035 * RMB,
 | 
			
		||||
	"qwen-coder-turbo":              0.002 * RMB,
 | 
			
		||||
	"qwen-coder-turbo-latest":       0.002 * RMB,
 | 
			
		||||
	"qwen-mt-plus":                  0.015 * RMB,
 | 
			
		||||
	"qwen-mt-turbo":                 0.001 * RMB,
 | 
			
		||||
	"qwq-32b-preview":               0.002 * RMB,
 | 
			
		||||
	"qwen2.5-72b-instruct":          0.004 * RMB,
 | 
			
		||||
	"qwen2.5-32b-instruct":          0.03 * RMB,
 | 
			
		||||
	"qwen2.5-14b-instruct":          0.001 * RMB,
 | 
			
		||||
	"qwen2.5-7b-instruct":           0.0005 * RMB,
 | 
			
		||||
	"qwen2.5-3b-instruct":           0.006 * RMB,
 | 
			
		||||
	"qwen2.5-1.5b-instruct":         0.0003 * RMB,
 | 
			
		||||
	"qwen2.5-0.5b-instruct":         0.0003 * RMB,
 | 
			
		||||
	"qwen2-72b-instruct":            0.004 * RMB,
 | 
			
		||||
	"qwen2-57b-a14b-instruct":       0.0035 * RMB,
 | 
			
		||||
	"qwen2-7b-instruct":             0.001 * RMB,
 | 
			
		||||
	"qwen2-1.5b-instruct":           0.001 * RMB,
 | 
			
		||||
	"qwen2-0.5b-instruct":           0.001 * RMB,
 | 
			
		||||
	"qwen1.5-110b-chat":             0.007 * RMB,
 | 
			
		||||
	"qwen1.5-72b-chat":              0.005 * RMB,
 | 
			
		||||
	"qwen1.5-32b-chat":              0.0035 * RMB,
 | 
			
		||||
	"qwen1.5-14b-chat":              0.002 * RMB,
 | 
			
		||||
	"qwen1.5-7b-chat":               0.001 * RMB,
 | 
			
		||||
	"qwen1.5-1.8b-chat":             0.001 * RMB,
 | 
			
		||||
	"qwen1.5-0.5b-chat":             0.001 * RMB,
 | 
			
		||||
	"qwen-72b-chat":                 0.02 * RMB,
 | 
			
		||||
	"qwen-14b-chat":                 0.008 * RMB,
 | 
			
		||||
	"qwen-7b-chat":                  0.006 * RMB,
 | 
			
		||||
	"qwen-1.8b-chat":                0.006 * RMB,
 | 
			
		||||
	"qwen-1.8b-longcontext-chat":    0.006 * RMB,
 | 
			
		||||
	"qvq-72b-preview":               0.012 * RMB,
 | 
			
		||||
	"qwen2.5-vl-72b-instruct":       0.016 * RMB,
 | 
			
		||||
	"qwen2.5-vl-7b-instruct":        0.002 * RMB,
 | 
			
		||||
	"qwen2.5-vl-3b-instruct":        0.0012 * RMB,
 | 
			
		||||
	"qwen2-vl-7b-instruct":          0.016 * RMB,
 | 
			
		||||
	"qwen2-vl-2b-instruct":          0.002 * RMB,
 | 
			
		||||
	"qwen-vl-v1":                    0.002 * RMB,
 | 
			
		||||
	"qwen-vl-chat-v1":               0.002 * RMB,
 | 
			
		||||
	"qwen2-audio-instruct":          0.002 * RMB,
 | 
			
		||||
	"qwen-audio-chat":               0.002 * RMB,
 | 
			
		||||
	"qwen2.5-math-72b-instruct":     0.004 * RMB,
 | 
			
		||||
	"qwen2.5-math-7b-instruct":      0.001 * RMB,
 | 
			
		||||
	"qwen2.5-math-1.5b-instruct":    0.001 * RMB,
 | 
			
		||||
	"qwen2-math-72b-instruct":       0.004 * RMB,
 | 
			
		||||
	"qwen2-math-7b-instruct":        0.001 * RMB,
 | 
			
		||||
	"qwen2-math-1.5b-instruct":      0.001 * RMB,
 | 
			
		||||
	"qwen2.5-coder-32b-instruct":    0.002 * RMB,
 | 
			
		||||
	"qwen2.5-coder-14b-instruct":    0.002 * RMB,
 | 
			
		||||
	"qwen2.5-coder-7b-instruct":     0.001 * RMB,
 | 
			
		||||
	"qwen2.5-coder-3b-instruct":     0.001 * RMB,
 | 
			
		||||
	"qwen2.5-coder-1.5b-instruct":   0.001 * RMB,
 | 
			
		||||
	"qwen2.5-coder-0.5b-instruct":   0.001 * RMB,
 | 
			
		||||
	"text-embedding-v1":             0.0007 * RMB, // ¥0.0007 / 1k tokens
 | 
			
		||||
	"text-embedding-v3":             0.0007 * RMB,
 | 
			
		||||
	"text-embedding-v2":             0.0007 * RMB,
 | 
			
		||||
	"text-embedding-async-v2":       0.0007 * RMB,
 | 
			
		||||
	"text-embedding-async-v1":       0.0007 * RMB,
 | 
			
		||||
	"ali-stable-diffusion-xl":       8.00,
 | 
			
		||||
	"ali-stable-diffusion-v1.5":     8.00,
 | 
			
		||||
	"wanx-v1":                       8.00,
 | 
			
		||||
	"deepseek-r1":                   0.002 * RMB,
 | 
			
		||||
	"deepseek-v3":                   0.001 * RMB,
 | 
			
		||||
	"deepseek-r1-distill-qwen-1.5b": 0.001 * RMB,
 | 
			
		||||
	"deepseek-r1-distill-qwen-7b":   0.0005 * RMB,
 | 
			
		||||
	"deepseek-r1-distill-qwen-14b":  0.001 * RMB,
 | 
			
		||||
	"deepseek-r1-distill-qwen-32b":  0.002 * RMB,
 | 
			
		||||
	"deepseek-r1-distill-llama-8b":  0.0005 * RMB,
 | 
			
		||||
	"deepseek-r1-distill-llama-70b": 0.004 * RMB,
 | 
			
		||||
	"SparkDesk":                     1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
	"SparkDesk-v1.1":                1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
	"SparkDesk-v2.1":                1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
@@ -362,6 +387,238 @@ var ModelRatio = map[string]float64{
 | 
			
		||||
	"mistralai/mistral-7b-instruct-v0.2":        0.050 * USD,
 | 
			
		||||
	"mistralai/mistral-7b-v0.1":                 0.050 * USD,
 | 
			
		||||
	"mistralai/mixtral-8x7b-instruct-v0.1":      0.300 * USD,
 | 
			
		||||
	//https://openrouter.ai/models
 | 
			
		||||
	"01-ai/yi-large":                                  1.5,
 | 
			
		||||
	"aetherwiing/mn-starcannon-12b":                   0.6,
 | 
			
		||||
	"ai21/jamba-1-5-large":                            4.0,
 | 
			
		||||
	"ai21/jamba-1-5-mini":                             0.2,
 | 
			
		||||
	"ai21/jamba-instruct":                             0.35,
 | 
			
		||||
	"aion-labs/aion-1.0":                              6.0,
 | 
			
		||||
	"aion-labs/aion-1.0-mini":                         1.2,
 | 
			
		||||
	"aion-labs/aion-rp-llama-3.1-8b":                  0.1,
 | 
			
		||||
	"allenai/llama-3.1-tulu-3-405b":                   5.0,
 | 
			
		||||
	"alpindale/goliath-120b":                          4.6875,
 | 
			
		||||
	"alpindale/magnum-72b":                            1.125,
 | 
			
		||||
	"amazon/nova-lite-v1":                             0.12,
 | 
			
		||||
	"amazon/nova-micro-v1":                            0.07,
 | 
			
		||||
	"amazon/nova-pro-v1":                              1.6,
 | 
			
		||||
	"anthracite-org/magnum-v2-72b":                    1.5,
 | 
			
		||||
	"anthracite-org/magnum-v4-72b":                    1.125,
 | 
			
		||||
	"anthropic/claude-2":                              12.0,
 | 
			
		||||
	"anthropic/claude-2.0":                            12.0,
 | 
			
		||||
	"anthropic/claude-2.0:beta":                       12.0,
 | 
			
		||||
	"anthropic/claude-2.1":                            12.0,
 | 
			
		||||
	"anthropic/claude-2.1:beta":                       12.0,
 | 
			
		||||
	"anthropic/claude-2:beta":                         12.0,
 | 
			
		||||
	"anthropic/claude-3-haiku":                        0.625,
 | 
			
		||||
	"anthropic/claude-3-haiku:beta":                   0.625,
 | 
			
		||||
	"anthropic/claude-3-opus":                         37.5,
 | 
			
		||||
	"anthropic/claude-3-opus:beta":                    37.5,
 | 
			
		||||
	"anthropic/claude-3-sonnet":                       7.5,
 | 
			
		||||
	"anthropic/claude-3-sonnet:beta":                  7.5,
 | 
			
		||||
	"anthropic/claude-3.5-haiku":                      2.0,
 | 
			
		||||
	"anthropic/claude-3.5-haiku-20241022":             2.0,
 | 
			
		||||
	"anthropic/claude-3.5-haiku-20241022:beta":        2.0,
 | 
			
		||||
	"anthropic/claude-3.5-haiku:beta":                 2.0,
 | 
			
		||||
	"anthropic/claude-3.5-sonnet":                     7.5,
 | 
			
		||||
	"anthropic/claude-3.5-sonnet-20240620":            7.5,
 | 
			
		||||
	"anthropic/claude-3.5-sonnet-20240620:beta":       7.5,
 | 
			
		||||
	"anthropic/claude-3.5-sonnet:beta":                7.5,
 | 
			
		||||
	"cognitivecomputations/dolphin-mixtral-8x22b":     0.45,
 | 
			
		||||
	"cognitivecomputations/dolphin-mixtral-8x7b":      0.25,
 | 
			
		||||
	"cohere/command":                                  0.95,
 | 
			
		||||
	"cohere/command-r":                                0.7125,
 | 
			
		||||
	"cohere/command-r-03-2024":                        0.7125,
 | 
			
		||||
	"cohere/command-r-08-2024":                        0.285,
 | 
			
		||||
	"cohere/command-r-plus":                           7.125,
 | 
			
		||||
	"cohere/command-r-plus-04-2024":                   7.125,
 | 
			
		||||
	"cohere/command-r-plus-08-2024":                   4.75,
 | 
			
		||||
	"cohere/command-r7b-12-2024":                      0.075,
 | 
			
		||||
	"databricks/dbrx-instruct":                        0.6,
 | 
			
		||||
	"deepseek/deepseek-chat":                          0.445,
 | 
			
		||||
	"deepseek/deepseek-chat-v2.5":                     1.0,
 | 
			
		||||
	"deepseek/deepseek-chat:free":                     0.0,
 | 
			
		||||
	"deepseek/deepseek-r1":                            1.2,
 | 
			
		||||
	"deepseek/deepseek-r1-distill-llama-70b":          0.345,
 | 
			
		||||
	"deepseek/deepseek-r1-distill-llama-70b:free":     0.0,
 | 
			
		||||
	"deepseek/deepseek-r1-distill-llama-8b":           0.02,
 | 
			
		||||
	"deepseek/deepseek-r1-distill-qwen-1.5b":          0.09,
 | 
			
		||||
	"deepseek/deepseek-r1-distill-qwen-14b":           0.075,
 | 
			
		||||
	"deepseek/deepseek-r1-distill-qwen-32b":           0.09,
 | 
			
		||||
	"deepseek/deepseek-r1:free":                       0.0,
 | 
			
		||||
	"eva-unit-01/eva-llama-3.33-70b":                  3.0,
 | 
			
		||||
	"eva-unit-01/eva-qwen-2.5-32b":                    1.7,
 | 
			
		||||
	"eva-unit-01/eva-qwen-2.5-72b":                    3.0,
 | 
			
		||||
	"google/gemini-2.0-flash-001":                     0.2,
 | 
			
		||||
	"google/gemini-2.0-flash-exp:free":                0.0,
 | 
			
		||||
	"google/gemini-2.0-flash-lite-preview-02-05:free": 0.0,
 | 
			
		||||
	"google/gemini-2.0-flash-thinking-exp-1219:free":  0.0,
 | 
			
		||||
	"google/gemini-2.0-flash-thinking-exp:free":       0.0,
 | 
			
		||||
	"google/gemini-2.0-pro-exp-02-05:free":            0.0,
 | 
			
		||||
	"google/gemini-exp-1206:free":                     0.0,
 | 
			
		||||
	"google/gemini-flash-1.5":                         0.15,
 | 
			
		||||
	"google/gemini-flash-1.5-8b":                      0.075,
 | 
			
		||||
	"google/gemini-flash-1.5-8b-exp":                  0.0,
 | 
			
		||||
	"google/gemini-pro":                               0.75,
 | 
			
		||||
	"google/gemini-pro-1.5":                           2.5,
 | 
			
		||||
	"google/gemini-pro-vision":                        0.75,
 | 
			
		||||
	"google/gemma-2-27b-it":                           0.135,
 | 
			
		||||
	"google/gemma-2-9b-it":                            0.03,
 | 
			
		||||
	"google/gemma-2-9b-it:free":                       0.0,
 | 
			
		||||
	"google/gemma-7b-it":                              0.075,
 | 
			
		||||
	"google/learnlm-1.5-pro-experimental:free":        0.0,
 | 
			
		||||
	"google/palm-2-chat-bison":                        1.0,
 | 
			
		||||
	"google/palm-2-chat-bison-32k":                    1.0,
 | 
			
		||||
	"google/palm-2-codechat-bison":                    1.0,
 | 
			
		||||
	"google/palm-2-codechat-bison-32k":                1.0,
 | 
			
		||||
	"gryphe/mythomax-l2-13b":                          0.0325,
 | 
			
		||||
	"gryphe/mythomax-l2-13b:free":                     0.0,
 | 
			
		||||
	"huggingfaceh4/zephyr-7b-beta:free":               0.0,
 | 
			
		||||
	"infermatic/mn-inferor-12b":                       0.6,
 | 
			
		||||
	"inflection/inflection-3-pi":                      5.0,
 | 
			
		||||
	"inflection/inflection-3-productivity":            5.0,
 | 
			
		||||
	"jondurbin/airoboros-l2-70b":                      0.25,
 | 
			
		||||
	"liquid/lfm-3b":                                   0.01,
 | 
			
		||||
	"liquid/lfm-40b":                                  0.075,
 | 
			
		||||
	"liquid/lfm-7b":                                   0.005,
 | 
			
		||||
	"mancer/weaver":                                   1.125,
 | 
			
		||||
	"meta-llama/llama-2-13b-chat":                     0.11,
 | 
			
		||||
	"meta-llama/llama-2-70b-chat":                     0.45,
 | 
			
		||||
	"meta-llama/llama-3-70b-instruct":                 0.2,
 | 
			
		||||
	"meta-llama/llama-3-8b-instruct":                  0.03,
 | 
			
		||||
	"meta-llama/llama-3-8b-instruct:free":             0.0,
 | 
			
		||||
	"meta-llama/llama-3.1-405b":                       1.0,
 | 
			
		||||
	"meta-llama/llama-3.1-405b-instruct":              0.4,
 | 
			
		||||
	"meta-llama/llama-3.1-70b-instruct":               0.15,
 | 
			
		||||
	"meta-llama/llama-3.1-8b-instruct":                0.025,
 | 
			
		||||
	"meta-llama/llama-3.2-11b-vision-instruct":        0.0275,
 | 
			
		||||
	"meta-llama/llama-3.2-11b-vision-instruct:free":   0.0,
 | 
			
		||||
	"meta-llama/llama-3.2-1b-instruct":                0.005,
 | 
			
		||||
	"meta-llama/llama-3.2-3b-instruct":                0.0125,
 | 
			
		||||
	"meta-llama/llama-3.2-90b-vision-instruct":        0.8,
 | 
			
		||||
	"meta-llama/llama-3.3-70b-instruct":               0.15,
 | 
			
		||||
	"meta-llama/llama-3.3-70b-instruct:free":          0.0,
 | 
			
		||||
	"meta-llama/llama-guard-2-8b":                     0.1,
 | 
			
		||||
	"microsoft/phi-3-medium-128k-instruct":            0.5,
 | 
			
		||||
	"microsoft/phi-3-medium-128k-instruct:free":       0.0,
 | 
			
		||||
	"microsoft/phi-3-mini-128k-instruct":              0.05,
 | 
			
		||||
	"microsoft/phi-3-mini-128k-instruct:free":         0.0,
 | 
			
		||||
	"microsoft/phi-3.5-mini-128k-instruct":            0.05,
 | 
			
		||||
	"microsoft/phi-4":                                 0.07,
 | 
			
		||||
	"microsoft/wizardlm-2-7b":                         0.035,
 | 
			
		||||
	"microsoft/wizardlm-2-8x22b":                      0.25,
 | 
			
		||||
	"minimax/minimax-01":                              0.55,
 | 
			
		||||
	"mistralai/codestral-2501":                        0.45,
 | 
			
		||||
	"mistralai/codestral-mamba":                       0.125,
 | 
			
		||||
	"mistralai/ministral-3b":                          0.02,
 | 
			
		||||
	"mistralai/ministral-8b":                          0.05,
 | 
			
		||||
	"mistralai/mistral-7b-instruct":                   0.0275,
 | 
			
		||||
	"mistralai/mistral-7b-instruct-v0.1":              0.1,
 | 
			
		||||
	"mistralai/mistral-7b-instruct-v0.3":              0.0275,
 | 
			
		||||
	"mistralai/mistral-7b-instruct:free":              0.0,
 | 
			
		||||
	"mistralai/mistral-large":                         3.0,
 | 
			
		||||
	"mistralai/mistral-large-2407":                    3.0,
 | 
			
		||||
	"mistralai/mistral-large-2411":                    3.0,
 | 
			
		||||
	"mistralai/mistral-medium":                        4.05,
 | 
			
		||||
	"mistralai/mistral-nemo":                          0.04,
 | 
			
		||||
	"mistralai/mistral-nemo:free":                     0.0,
 | 
			
		||||
	"mistralai/mistral-small":                         0.3,
 | 
			
		||||
	"mistralai/mistral-small-24b-instruct-2501":       0.07,
 | 
			
		||||
	"mistralai/mistral-small-24b-instruct-2501:free":  0.0,
 | 
			
		||||
	"mistralai/mistral-tiny":                          0.125,
 | 
			
		||||
	"mistralai/mixtral-8x22b-instruct":                0.45,
 | 
			
		||||
	"mistralai/mixtral-8x7b":                          0.3,
 | 
			
		||||
	"mistralai/mixtral-8x7b-instruct":                 0.12,
 | 
			
		||||
	"mistralai/pixtral-12b":                           0.05,
 | 
			
		||||
	"mistralai/pixtral-large-2411":                    3.0,
 | 
			
		||||
	"neversleep/llama-3-lumimaid-70b":                 2.25,
 | 
			
		||||
	"neversleep/llama-3-lumimaid-8b":                  0.5625,
 | 
			
		||||
	"neversleep/llama-3-lumimaid-8b:extended":         0.5625,
 | 
			
		||||
	"neversleep/llama-3.1-lumimaid-70b":               2.25,
 | 
			
		||||
	"neversleep/llama-3.1-lumimaid-8b":                0.5625,
 | 
			
		||||
	"neversleep/noromaid-20b":                         1.125,
 | 
			
		||||
	"nothingiisreal/mn-celeste-12b":                   0.6,
 | 
			
		||||
	"nousresearch/hermes-2-pro-llama-3-8b":            0.02,
 | 
			
		||||
	"nousresearch/hermes-3-llama-3.1-405b":            0.4,
 | 
			
		||||
	"nousresearch/hermes-3-llama-3.1-70b":             0.15,
 | 
			
		||||
	"nousresearch/nous-hermes-2-mixtral-8x7b-dpo":     0.3,
 | 
			
		||||
	"nousresearch/nous-hermes-llama2-13b":             0.085,
 | 
			
		||||
	"nvidia/llama-3.1-nemotron-70b-instruct":          0.15,
 | 
			
		||||
	"nvidia/llama-3.1-nemotron-70b-instruct:free":     0.0,
 | 
			
		||||
	"openai/chatgpt-4o-latest":                        7.5,
 | 
			
		||||
	"openai/gpt-3.5-turbo":                            0.75,
 | 
			
		||||
	"openai/gpt-3.5-turbo-0125":                       0.75,
 | 
			
		||||
	"openai/gpt-3.5-turbo-0613":                       1.0,
 | 
			
		||||
	"openai/gpt-3.5-turbo-1106":                       1.0,
 | 
			
		||||
	"openai/gpt-3.5-turbo-16k":                        2.0,
 | 
			
		||||
	"openai/gpt-3.5-turbo-instruct":                   1.0,
 | 
			
		||||
	"openai/gpt-4":                                    30.0,
 | 
			
		||||
	"openai/gpt-4-0314":                               30.0,
 | 
			
		||||
	"openai/gpt-4-1106-preview":                       15.0,
 | 
			
		||||
	"openai/gpt-4-32k":                                60.0,
 | 
			
		||||
	"openai/gpt-4-32k-0314":                           60.0,
 | 
			
		||||
	"openai/gpt-4-turbo":                              15.0,
 | 
			
		||||
	"openai/gpt-4-turbo-preview":                      15.0,
 | 
			
		||||
	"openai/gpt-4o":                                   5.0,
 | 
			
		||||
	"openai/gpt-4o-2024-05-13":                        7.5,
 | 
			
		||||
	"openai/gpt-4o-2024-08-06":                        5.0,
 | 
			
		||||
	"openai/gpt-4o-2024-11-20":                        5.0,
 | 
			
		||||
	"openai/gpt-4o-mini":                              0.3,
 | 
			
		||||
	"openai/gpt-4o-mini-2024-07-18":                   0.3,
 | 
			
		||||
	"openai/gpt-4o:extended":                          9.0,
 | 
			
		||||
	"openai/o1":                                       30.0,
 | 
			
		||||
	"openai/o1-mini":                                  2.2,
 | 
			
		||||
	"openai/o1-mini-2024-09-12":                       2.2,
 | 
			
		||||
	"openai/o1-preview":                               30.0,
 | 
			
		||||
	"openai/o1-preview-2024-09-12":                    30.0,
 | 
			
		||||
	"openai/o3-mini":                                  2.2,
 | 
			
		||||
	"openai/o3-mini-high":                             2.2,
 | 
			
		||||
	"openchat/openchat-7b":                            0.0275,
 | 
			
		||||
	"openchat/openchat-7b:free":                       0.0,
 | 
			
		||||
	"openrouter/auto":                                 -500000.0,
 | 
			
		||||
	"perplexity/llama-3.1-sonar-huge-128k-online":     2.5,
 | 
			
		||||
	"perplexity/llama-3.1-sonar-large-128k-chat":      0.5,
 | 
			
		||||
	"perplexity/llama-3.1-sonar-large-128k-online":    0.5,
 | 
			
		||||
	"perplexity/llama-3.1-sonar-small-128k-chat":      0.1,
 | 
			
		||||
	"perplexity/llama-3.1-sonar-small-128k-online":    0.1,
 | 
			
		||||
	"perplexity/sonar":                                0.5,
 | 
			
		||||
	"perplexity/sonar-reasoning":                      2.5,
 | 
			
		||||
	"pygmalionai/mythalion-13b":                       0.6,
 | 
			
		||||
	"qwen/qvq-72b-preview":                            0.25,
 | 
			
		||||
	"qwen/qwen-2-72b-instruct":                        0.45,
 | 
			
		||||
	"qwen/qwen-2-7b-instruct":                         0.027,
 | 
			
		||||
	"qwen/qwen-2-7b-instruct:free":                    0.0,
 | 
			
		||||
	"qwen/qwen-2-vl-72b-instruct":                     0.2,
 | 
			
		||||
	"qwen/qwen-2-vl-7b-instruct":                      0.05,
 | 
			
		||||
	"qwen/qwen-2.5-72b-instruct":                      0.2,
 | 
			
		||||
	"qwen/qwen-2.5-7b-instruct":                       0.025,
 | 
			
		||||
	"qwen/qwen-2.5-coder-32b-instruct":                0.08,
 | 
			
		||||
	"qwen/qwen-max":                                   3.2,
 | 
			
		||||
	"qwen/qwen-plus":                                  0.6,
 | 
			
		||||
	"qwen/qwen-turbo":                                 0.1,
 | 
			
		||||
	"qwen/qwen-vl-plus:free":                          0.0,
 | 
			
		||||
	"qwen/qwen2.5-vl-72b-instruct:free":               0.0,
 | 
			
		||||
	"qwen/qwq-32b-preview":                            0.09,
 | 
			
		||||
	"raifle/sorcererlm-8x22b":                         2.25,
 | 
			
		||||
	"sao10k/fimbulvetr-11b-v2":                        0.6,
 | 
			
		||||
	"sao10k/l3-euryale-70b":                           0.4,
 | 
			
		||||
	"sao10k/l3-lunaris-8b":                            0.03,
 | 
			
		||||
	"sao10k/l3.1-70b-hanami-x1":                       1.5,
 | 
			
		||||
	"sao10k/l3.1-euryale-70b":                         0.4,
 | 
			
		||||
	"sao10k/l3.3-euryale-70b":                         0.4,
 | 
			
		||||
	"sophosympatheia/midnight-rose-70b":               0.4,
 | 
			
		||||
	"sophosympatheia/rogue-rose-103b-v0.2:free":       0.0,
 | 
			
		||||
	"teknium/openhermes-2.5-mistral-7b":               0.085,
 | 
			
		||||
	"thedrummer/rocinante-12b":                        0.25,
 | 
			
		||||
	"thedrummer/unslopnemo-12b":                       0.25,
 | 
			
		||||
	"undi95/remm-slerp-l2-13b":                        0.6,
 | 
			
		||||
	"undi95/toppy-m-7b":                               0.035,
 | 
			
		||||
	"undi95/toppy-m-7b:free":                          0.0,
 | 
			
		||||
	"x-ai/grok-2-1212":                                5.0,
 | 
			
		||||
	"x-ai/grok-2-vision-1212":                         5.0,
 | 
			
		||||
	"x-ai/grok-beta":                                  7.5,
 | 
			
		||||
	"x-ai/grok-vision-beta":                           7.5,
 | 
			
		||||
	"xwin-lm/xwin-lm-70b":                             1.875,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var CompletionRatio = map[string]float64{
 | 
			
		||||
 
 | 
			
		||||
@@ -51,5 +51,7 @@ const (
 | 
			
		||||
	BaiduV2
 | 
			
		||||
	XunfeiV2
 | 
			
		||||
	AliBailian
 | 
			
		||||
	OpenAICompatible
 | 
			
		||||
	GeminiOpenAICompatible
 | 
			
		||||
	Dummy
 | 
			
		||||
)
 | 
			
		||||
 
 | 
			
		||||
@@ -51,6 +51,9 @@ var ChannelBaseURLs = []string{
 | 
			
		||||
	"https://qianfan.baidubce.com",              // 47
 | 
			
		||||
	"https://spark-api-open.xf-yun.com",         // 48
 | 
			
		||||
	"https://dashscope.aliyuncs.com",            // 49
 | 
			
		||||
	"",                                          // 50
 | 
			
		||||
 | 
			
		||||
	"https://generativelanguage.googleapis.com/v1beta/openai/", // 51
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
 
 | 
			
		||||
@@ -3,13 +3,13 @@ import { useSelector } from 'react-redux';
 | 
			
		||||
import useRegister from 'hooks/useRegister';
 | 
			
		||||
import Turnstile from 'react-turnstile';
 | 
			
		||||
import { useSearchParams } from 'react-router-dom';
 | 
			
		||||
// import { useSelector } from 'react-redux';
 | 
			
		||||
 | 
			
		||||
// material-ui
 | 
			
		||||
import { useTheme } from '@mui/material/styles';
 | 
			
		||||
import {
 | 
			
		||||
  Box,
 | 
			
		||||
  Button,
 | 
			
		||||
  CircularProgress,
 | 
			
		||||
  FormControl,
 | 
			
		||||
  FormHelperText,
 | 
			
		||||
  Grid,
 | 
			
		||||
@@ -50,6 +50,9 @@ const RegisterForm = ({ ...others }) => {
 | 
			
		||||
  const [strength, setStrength] = useState(0);
 | 
			
		||||
  const [level, setLevel] = useState();
 | 
			
		||||
 | 
			
		||||
  const [timer, setTimer] = useState(0);
 | 
			
		||||
  const [loading, setLoading] = useState(false);
 | 
			
		||||
 | 
			
		||||
  const handleClickShowPassword = () => {
 | 
			
		||||
    setShowPassword(!showPassword);
 | 
			
		||||
  };
 | 
			
		||||
@@ -74,11 +77,17 @@ const RegisterForm = ({ ...others }) => {
 | 
			
		||||
      return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    setLoading(true); // Start loading
 | 
			
		||||
 | 
			
		||||
    const { success, message } = await sendVerificationCode(email, turnstileToken);
 | 
			
		||||
    setLoading(false); // Stop loading
 | 
			
		||||
 | 
			
		||||
    if (!success) {
 | 
			
		||||
      showError(message);
 | 
			
		||||
      return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    setTimer(60); // Start the 60-second timer
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  useEffect(() => {
 | 
			
		||||
@@ -94,6 +103,17 @@ const RegisterForm = ({ ...others }) => {
 | 
			
		||||
    }
 | 
			
		||||
  }, [siteInfo]);
 | 
			
		||||
 | 
			
		||||
  useEffect(() => {
 | 
			
		||||
    let interval;
 | 
			
		||||
    if (timer > 0) {
 | 
			
		||||
      interval = setInterval(() => {
 | 
			
		||||
        setTimer((prevTimer) => prevTimer - 1);
 | 
			
		||||
      }, 1000);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return () => clearInterval(interval);
 | 
			
		||||
  }, [timer]);
 | 
			
		||||
 | 
			
		||||
  return (
 | 
			
		||||
      <>
 | 
			
		||||
        <Formik
 | 
			
		||||
@@ -240,8 +260,13 @@ const RegisterForm = ({ ...others }) => {
 | 
			
		||||
                            onChange={handleChange}
 | 
			
		||||
                            endAdornment={
 | 
			
		||||
                              <InputAdornment position="end">
 | 
			
		||||
                        <Button variant="contained" color="primary" onClick={() => handleSendCode(values.email)}>
 | 
			
		||||
                          发送验证码
 | 
			
		||||
                                <Button
 | 
			
		||||
                                    variant="contained"
 | 
			
		||||
                                    color="primary"
 | 
			
		||||
                                    onClick={() => handleSendCode(values.email)}
 | 
			
		||||
                                    disabled={timer > 0 || loading}
 | 
			
		||||
                                >
 | 
			
		||||
                                  {loading ? <CircularProgress size={24} /> : timer > 0 ? `${timer}s` : '发送验证码'}
 | 
			
		||||
                                </Button>
 | 
			
		||||
                              </InputAdornment>
 | 
			
		||||
                            }
 | 
			
		||||
 
 | 
			
		||||
@@ -44,6 +44,9 @@ function renderType(type, t) {
 | 
			
		||||
function renderBalance(type, balance, t) {
 | 
			
		||||
  switch (type) {
 | 
			
		||||
    case 1: // OpenAI
 | 
			
		||||
        if (balance === 0) {
 | 
			
		||||
            return <span>{t('channel.table.balance_not_supported')}</span>;
 | 
			
		||||
        }
 | 
			
		||||
      return <span>${balance.toFixed(2)}</span>;
 | 
			
		||||
    case 4: // CloseAI
 | 
			
		||||
      return <span>¥{balance.toFixed(2)}</span>;
 | 
			
		||||
@@ -588,7 +591,15 @@ const ChannelsTable = () => {
 | 
			
		||||
                    />
 | 
			
		||||
                  </Table.Cell>
 | 
			
		||||
                  <Table.Cell>
 | 
			
		||||
                    <div>
 | 
			
		||||
                    <div
 | 
			
		||||
                      style={{
 | 
			
		||||
                        display: 'flex',
 | 
			
		||||
                        alignItems: 'center',
 | 
			
		||||
                        flexWrap: 'wrap',
 | 
			
		||||
                        gap: '2px',
 | 
			
		||||
                        rowGap: '6px',
 | 
			
		||||
                      }}
 | 
			
		||||
                    >
 | 
			
		||||
                      <Button
 | 
			
		||||
                        size={'tiny'}
 | 
			
		||||
                        positive
 | 
			
		||||
 
 | 
			
		||||
@@ -1,10 +1,24 @@
 | 
			
		||||
export const CHANNEL_OPTIONS = [
 | 
			
		||||
  { key: 1, text: 'OpenAI', value: 1, color: 'green' },
 | 
			
		||||
    {key: 14, text: 'Anthropic Claude', value: 14, color: 'black'},
 | 
			
		||||
  {
 | 
			
		||||
    key: 50,
 | 
			
		||||
    text: 'OpenAI 兼容',
 | 
			
		||||
    value: 50,
 | 
			
		||||
    color: 'olive',
 | 
			
		||||
    description: 'OpenAI 兼容渠道,支持设置 Base URL',
 | 
			
		||||
  },
 | 
			
		||||
  {key: 14, text: 'Anthropic', value: 14, color: 'black'},
 | 
			
		||||
  { key: 33, text: 'AWS', value: 33, color: 'black' },
 | 
			
		||||
    {key: 3, text: 'Azure OpenAI', value: 3, color: 'olive'},
 | 
			
		||||
    {key: 11, text: 'Google PaLM2', value: 11, color: 'orange'},
 | 
			
		||||
    {key: 24, text: 'Google Gemini', value: 24, color: 'orange'},
 | 
			
		||||
  {key: 3, text: 'Azure', value: 3, color: 'olive'},
 | 
			
		||||
  {key: 11, text: 'PaLM2', value: 11, color: 'orange'},
 | 
			
		||||
  {key: 24, text: 'Gemini', value: 24, color: 'orange'},
 | 
			
		||||
  {
 | 
			
		||||
    key: 51,
 | 
			
		||||
    text: 'Gemini (OpenAI)',
 | 
			
		||||
    value: 51,
 | 
			
		||||
    color: 'orange',
 | 
			
		||||
    description: 'Gemini OpenAI 兼容格式',
 | 
			
		||||
  },
 | 
			
		||||
  { key: 28, text: 'Mistral AI', value: 28, color: 'orange' },
 | 
			
		||||
  { key: 41, text: 'Novita', value: 41, color: 'purple' },
 | 
			
		||||
  {
 | 
			
		||||
@@ -71,7 +85,14 @@ export const CHANNEL_OPTIONS = [
 | 
			
		||||
  { key: 44, text: 'SiliconFlow', value: 44, color: 'blue' },
 | 
			
		||||
  { key: 45, text: 'xAI', value: 45, color: 'blue' },
 | 
			
		||||
  { key: 46, text: 'Replicate', value: 46, color: 'blue' },
 | 
			
		||||
    {key: 8, text: '自定义渠道', value: 8, color: 'pink'},
 | 
			
		||||
  {
 | 
			
		||||
    key: 8,
 | 
			
		||||
    text: '自定义渠道',
 | 
			
		||||
    value: 8,
 | 
			
		||||
    color: 'pink',
 | 
			
		||||
    tip: '不推荐使用,请使用 <strong>OpenAI 兼容</strong>渠道类型。注意,这里所需要填入的代理地址仅会在实际请求时替换域名部分,如果你想填入 OpenAI SDK 中所要求的 Base URL,请使用 OpenAI 兼容渠道类型',
 | 
			
		||||
    description: '不推荐使用,请使用 OpenAI 兼容渠道类型',
 | 
			
		||||
  },
 | 
			
		||||
  { key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' },
 | 
			
		||||
  { key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' },
 | 
			
		||||
  { key: 20, text: 'OpenRouter', value: 20, color: 'black' },
 | 
			
		||||
 
 | 
			
		||||
@@ -16,7 +16,15 @@ export function renderGroup(group) {
 | 
			
		||||
  let groups = group.split(',');
 | 
			
		||||
  groups.sort();
 | 
			
		||||
  return (
 | 
			
		||||
    <>
 | 
			
		||||
    <div
 | 
			
		||||
      style={{
 | 
			
		||||
        display: 'flex',
 | 
			
		||||
        alignItems: 'center',
 | 
			
		||||
        flexWrap: 'wrap',
 | 
			
		||||
        gap: '2px',
 | 
			
		||||
        rowGap: '6px',
 | 
			
		||||
      }}
 | 
			
		||||
    >
 | 
			
		||||
      {groups.map((group) => {
 | 
			
		||||
        if (group === 'vip' || group === 'pro') {
 | 
			
		||||
          return <Label color='yellow'>{group}</Label>;
 | 
			
		||||
@@ -25,7 +33,7 @@ export function renderGroup(group) {
 | 
			
		||||
        }
 | 
			
		||||
        return <Label>{group}</Label>;
 | 
			
		||||
      })}
 | 
			
		||||
    </>
 | 
			
		||||
    </div>
 | 
			
		||||
  );
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -74,6 +74,7 @@ if (isMobile()) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export function showError(error) {
 | 
			
		||||
  if (!error) return;
 | 
			
		||||
  console.error(error);
 | 
			
		||||
  if (error.message) {
 | 
			
		||||
    if (error.name === 'AxiosError') {
 | 
			
		||||
@@ -158,17 +159,7 @@ export function timestamp2string(timestamp) {
 | 
			
		||||
    second = '0' + second;
 | 
			
		||||
  }
 | 
			
		||||
  return (
 | 
			
		||||
    year +
 | 
			
		||||
    '-' +
 | 
			
		||||
    month +
 | 
			
		||||
    '-' +
 | 
			
		||||
    day +
 | 
			
		||||
    ' ' +
 | 
			
		||||
    hour +
 | 
			
		||||
    ':' +
 | 
			
		||||
    minute +
 | 
			
		||||
    ':' +
 | 
			
		||||
    second
 | 
			
		||||
      year + '-' + month + '-' + day + ' ' + hour + ':' + minute + ':' + second
 | 
			
		||||
  );
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -193,7 +184,6 @@ export const verifyJSON = (str) => {
 | 
			
		||||
export function shouldShowPrompt(id) {
 | 
			
		||||
  let prompt = localStorage.getItem(`prompt-${id}`);
 | 
			
		||||
  return !prompt;
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export function setPromptShown(id) {
 | 
			
		||||
 
 | 
			
		||||
@@ -104,8 +104,10 @@
 | 
			
		||||
      "model_mapping_placeholder": "Optional, used to modify model names in request body. A JSON string where keys are request model names and values are target model names",
 | 
			
		||||
      "system_prompt": "System Prompt",
 | 
			
		||||
      "system_prompt_placeholder": "Optional, used to force set system prompt. Use with custom model & model mapping. First create a unique custom model name above, then map it to a natively supported model",
 | 
			
		||||
      "base_url": "Proxy",
 | 
			
		||||
      "base_url_placeholder": "Optional, used for API calls through proxy. Enter proxy address in format: https://domain.com",
 | 
			
		||||
      "proxy_url": "Proxy",
 | 
			
		||||
      "proxy_url_placeholder": "This is optional and used for API calls via a proxy. Please enter the proxy URL, formatted as: https://domain.com",
 | 
			
		||||
      "base_url": "Base URL",
 | 
			
		||||
      "base_url_placeholder": "The Base URL required by the OpenAPI SDK",
 | 
			
		||||
      "key": "Key",
 | 
			
		||||
      "key_placeholder": "Please enter key",
 | 
			
		||||
      "batch": "Batch Create",
 | 
			
		||||
 
 | 
			
		||||
@@ -104,8 +104,10 @@
 | 
			
		||||
      "model_mapping_placeholder": "此项可选,用于修改请求体中的模型名称,为一个 JSON 字符串,键为请求中模型名称,值为要替换的模型名称",
 | 
			
		||||
      "system_prompt": "系统提示词",
 | 
			
		||||
      "system_prompt_placeholder": "此项可选,用于强制设置给定的系统提示词,请配合自定义模型 & 模型重定向使用,首先创建一个唯一的自定义模型名称并在上面填入,之后将该自定义模型重定向映射到该渠道一个原生支持的模型",
 | 
			
		||||
      "base_url": "代理",
 | 
			
		||||
      "base_url_placeholder": "此项可选,用于通过代理站来进行 API 调用,请输入代理站地址,格式为:https://domain.com",
 | 
			
		||||
      "proxy_url": "代理",
 | 
			
		||||
      "proxy_url_placeholder": "此项可选,用于通过代理站来进行 API 调用,请输入代理站地址,格式为:https://domain.com。注意,这里所需要填入的代理地址仅会在实际请求时替换域名部分,如果你想填入 OpenAI SDK 中所要求的 Base URL,请使用 OpenAI 兼容渠道类型",
 | 
			
		||||
      "base_url": "Base URL",
 | 
			
		||||
      "base_url_placeholder": "OpenAPI SDK 中所要求的 Base URL",
 | 
			
		||||
      "key": "密钥",
 | 
			
		||||
      "key_placeholder": "请输入密钥",
 | 
			
		||||
      "batch": "批量创建",
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,6 @@
 | 
			
		||||
import React, {useEffect, useState} from 'react';
 | 
			
		||||
import {useTranslation} from 'react-i18next';
 | 
			
		||||
import {Button, Card, Form, Input, Message,} from 'semantic-ui-react';
 | 
			
		||||
import {Button, Card, Form, Input, Message} from 'semantic-ui-react';
 | 
			
		||||
import {useNavigate, useParams} from 'react-router-dom';
 | 
			
		||||
import {API, copy, getChannelModels, showError, showInfo, showSuccess, verifyJSON,} from '../../helpers';
 | 
			
		||||
import {CHANNEL_OPTIONS} from '../../constants';
 | 
			
		||||
@@ -339,6 +339,20 @@ const EditChannel = () => {
 | 
			
		||||
            {inputs.type === 8 && (
 | 
			
		||||
              <Form.Field>
 | 
			
		||||
                <Form.Input
 | 
			
		||||
                    required
 | 
			
		||||
                    label={t('channel.edit.proxy_url')}
 | 
			
		||||
                    name='base_url'
 | 
			
		||||
                    placeholder={t('channel.edit.proxy_url_placeholder')}
 | 
			
		||||
                    onChange={handleInputChange}
 | 
			
		||||
                    value={inputs.base_url}
 | 
			
		||||
                    autoComplete='new-password'
 | 
			
		||||
                />
 | 
			
		||||
              </Form.Field>
 | 
			
		||||
            )}
 | 
			
		||||
            {inputs.type === 50 && (
 | 
			
		||||
                <Form.Field>
 | 
			
		||||
                  <Form.Input
 | 
			
		||||
                      required
 | 
			
		||||
                  label={t('channel.edit.base_url')}
 | 
			
		||||
                  name='base_url'
 | 
			
		||||
                  placeholder={t('channel.edit.base_url_placeholder')}
 | 
			
		||||
@@ -637,12 +651,13 @@ const EditChannel = () => {
 | 
			
		||||
            {inputs.type !== 3 &&
 | 
			
		||||
              inputs.type !== 33 &&
 | 
			
		||||
              inputs.type !== 8 &&
 | 
			
		||||
                inputs.type !== 50 &&
 | 
			
		||||
              inputs.type !== 22 && (
 | 
			
		||||
                <Form.Field>
 | 
			
		||||
                  <Form.Input
 | 
			
		||||
                    label={t('channel.edit.base_url')}
 | 
			
		||||
                      label={t('channel.edit.proxy_url')}
 | 
			
		||||
                    name='base_url'
 | 
			
		||||
                    placeholder={t('channel.edit.base_url_placeholder')}
 | 
			
		||||
                      placeholder={t('channel.edit.proxy_url_placeholder')}
 | 
			
		||||
                    onChange={handleInputChange}
 | 
			
		||||
                    value={inputs.base_url}
 | 
			
		||||
                    autoComplete='new-password'
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user