Merge pull request #266 from Calcium-Ion/custom-channel

feat: 自定义渠道功能变更
This commit is contained in:
Calcium-Ion 2024-05-21 19:57:51 +08:00 committed by GitHub
commit f421699e1b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 100 additions and 96 deletions

View File

@ -5,7 +5,7 @@ import (
"strings" "strings"
) )
//from songquanpeng/one-api // from songquanpeng/one-api
const ( const (
USD2RMB = 7.3 // 暂定 1 USD = 7.3 RMB USD2RMB = 7.3 // 暂定 1 USD = 7.3 RMB
USD = 500 // $0.002 = 1 -> $1 = 500 USD = 500 // $0.002 = 1 -> $1 = 500
@ -40,86 +40,86 @@ var DefaultModelRatio = map[string]float64{
"gpt-4-turbo-2024-04-09": 5, // $0.01 / 1K tokens "gpt-4-turbo-2024-04-09": 5, // $0.01 / 1K tokens
"gpt-3.5-turbo": 0.25, // $0.0015 / 1K tokens "gpt-3.5-turbo": 0.25, // $0.0015 / 1K tokens
//"gpt-3.5-turbo-0301": 0.75, //deprecated //"gpt-3.5-turbo-0301": 0.75, //deprecated
"gpt-3.5-turbo-0613": 0.75, "gpt-3.5-turbo-0613": 0.75,
"gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens "gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens
"gpt-3.5-turbo-16k-0613": 1.5, "gpt-3.5-turbo-16k-0613": 1.5,
"gpt-3.5-turbo-instruct": 0.75, // $0.0015 / 1K tokens "gpt-3.5-turbo-instruct": 0.75, // $0.0015 / 1K tokens
"gpt-3.5-turbo-1106": 0.5, // $0.001 / 1K tokens "gpt-3.5-turbo-1106": 0.5, // $0.001 / 1K tokens
"gpt-3.5-turbo-0125": 0.25, "gpt-3.5-turbo-0125": 0.25,
"babbage-002": 0.2, // $0.0004 / 1K tokens "babbage-002": 0.2, // $0.0004 / 1K tokens
"davinci-002": 1, // $0.002 / 1K tokens "davinci-002": 1, // $0.002 / 1K tokens
"text-ada-001": 0.2, "text-ada-001": 0.2,
"text-babbage-001": 0.25, "text-babbage-001": 0.25,
"text-curie-001": 1, "text-curie-001": 1,
"text-davinci-002": 10, "text-davinci-002": 10,
"text-davinci-003": 10, "text-davinci-003": 10,
"text-davinci-edit-001": 10, "text-davinci-edit-001": 10,
"code-davinci-edit-001": 10, "code-davinci-edit-001": 10,
"whisper-1": 15, // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens "whisper-1": 15, // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
"tts-1": 7.5, // 1k characters -> $0.015 "tts-1": 7.5, // 1k characters -> $0.015
"tts-1-1106": 7.5, // 1k characters -> $0.015 "tts-1-1106": 7.5, // 1k characters -> $0.015
"tts-1-hd": 15, // 1k characters -> $0.03 "tts-1-hd": 15, // 1k characters -> $0.03
"tts-1-hd-1106": 15, // 1k characters -> $0.03 "tts-1-hd-1106": 15, // 1k characters -> $0.03
"davinci": 10, "davinci": 10,
"curie": 10, "curie": 10,
"babbage": 10, "babbage": 10,
"ada": 10, "ada": 10,
"text-embedding-3-small": 0.01, "text-embedding-3-small": 0.01,
"text-embedding-3-large": 0.065, "text-embedding-3-large": 0.065,
"text-embedding-ada-002": 0.05, "text-embedding-ada-002": 0.05,
"text-search-ada-doc-001": 10, "text-search-ada-doc-001": 10,
"text-moderation-stable": 0.1, "text-moderation-stable": 0.1,
"text-moderation-latest": 0.1, "text-moderation-latest": 0.1,
"claude-instant-1": 0.4, // $0.8 / 1M tokens "claude-instant-1": 0.4, // $0.8 / 1M tokens
"claude-2.0": 4, // $8 / 1M tokens "claude-2.0": 4, // $8 / 1M tokens
"claude-2.1": 4, // $8 / 1M tokens "claude-2.1": 4, // $8 / 1M tokens
"claude-3-haiku-20240307": 0.125, // $0.25 / 1M tokens "claude-3-haiku-20240307": 0.125, // $0.25 / 1M tokens
"claude-3-sonnet-20240229": 1.5, // $3 / 1M tokens "claude-3-sonnet-20240229": 1.5, // $3 / 1M tokens
"claude-3-opus-20240229": 7.5, // $15 / 1M tokens "claude-3-opus-20240229": 7.5, // $15 / 1M tokens
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens //renamed to ERNIE-3.5-8K "ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens //renamed to ERNIE-3.5-8K
"ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens //renamed to ERNIE-Lite-8K "ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens //renamed to ERNIE-Lite-8K
"ERNIE-Bot-4": 8.572, // ¥0.12 / 1k tokens //renamed to ERNIE-4.0-8K "ERNIE-Bot-4": 8.572, // ¥0.12 / 1k tokens //renamed to ERNIE-4.0-8K
"ERNIE-4.0-8K": 8.572, // ¥0.12 / 1k tokens "ERNIE-4.0-8K": 8.572, // ¥0.12 / 1k tokens
"ERNIE-3.5-8K": 0.8572, // ¥0.012 / 1k tokens "ERNIE-3.5-8K": 0.8572, // ¥0.012 / 1k tokens
"ERNIE-Speed-8K": 0.2858, // ¥0.004 / 1k tokens "ERNIE-Speed-8K": 0.2858, // ¥0.004 / 1k tokens
"ERNIE-Speed-128K": 0.2858, // ¥0.004 / 1k tokens "ERNIE-Speed-128K": 0.2858, // ¥0.004 / 1k tokens
"ERNIE-Lite-8K": 0.2143, // ¥0.003 / 1k tokens "ERNIE-Lite-8K": 0.2143, // ¥0.003 / 1k tokens
"ERNIE-Tiny-8K": 0.0715, // ¥0.001 / 1k tokens "ERNIE-Tiny-8K": 0.0715, // ¥0.001 / 1k tokens
"ERNIE-Character-8K": 0.2858, // ¥0.004 / 1k tokens "ERNIE-Character-8K": 0.2858, // ¥0.004 / 1k tokens
"ERNIE-Functions-8K": 0.2858, // ¥0.004 / 1k tokens "ERNIE-Functions-8K": 0.2858, // ¥0.004 / 1k tokens
"Embedding-V1": 0.1429, // ¥0.002 / 1k tokens "Embedding-V1": 0.1429, // ¥0.002 / 1k tokens
"PaLM-2": 1, "PaLM-2": 1,
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens "gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
"gemini-pro-vision": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens "gemini-pro-vision": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
"gemini-1.0-pro-vision-001": 1, "gemini-1.0-pro-vision-001": 1,
"gemini-1.0-pro-001": 1, "gemini-1.0-pro-001": 1,
"gemini-1.5-pro-latest": 1, "gemini-1.5-pro-latest": 1,
"gemini-1.5-flash-latest": 1, "gemini-1.5-flash-latest": 1,
"gemini-1.0-pro-latest": 1, "gemini-1.0-pro-latest": 1,
"gemini-1.0-pro-vision-latest": 1, "gemini-1.0-pro-vision-latest": 1,
"gemini-ultra": 1, "gemini-ultra": 1,
"chatglm_turbo": 0.3572, // ¥0.005 / 1k tokens "chatglm_turbo": 0.3572, // ¥0.005 / 1k tokens
"chatglm_pro": 0.7143, // ¥0.01 / 1k tokens "chatglm_pro": 0.7143, // ¥0.01 / 1k tokens
"chatglm_std": 0.3572, // ¥0.005 / 1k tokens "chatglm_std": 0.3572, // ¥0.005 / 1k tokens
"chatglm_lite": 0.1429, // ¥0.002 / 1k tokens "chatglm_lite": 0.1429, // ¥0.002 / 1k tokens
"glm-4": 7.143, // ¥0.1 / 1k tokens "glm-4": 7.143, // ¥0.1 / 1k tokens
"glm-4v": 7.143, // ¥0.1 / 1k tokens "glm-4v": 7.143, // ¥0.1 / 1k tokens
"glm-3-turbo": 0.3572, "glm-3-turbo": 0.3572,
"qwen-turbo": 0.8572, // ¥0.012 / 1k tokens "qwen-turbo": 0.8572, // ¥0.012 / 1k tokens
"qwen-plus": 10, // ¥0.14 / 1k tokens "qwen-plus": 10, // ¥0.14 / 1k tokens
"text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens "text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens
"SparkDesk-v1.1": 1.2858, // ¥0.018 / 1k tokens "SparkDesk-v1.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens "SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens "SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens "SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens "360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
"360gpt-turbo": 0.0858, // ¥0.0012 / 1k tokens "360gpt-turbo": 0.0858, // ¥0.0012 / 1k tokens
"360gpt-turbo-responsibility-8k": 0.8572, // ¥0.012 / 1k tokens "360gpt-turbo-responsibility-8k": 0.8572, // ¥0.012 / 1k tokens
"360gpt-pro": 0.8572, // ¥0.012 / 1k tokens "360gpt-pro": 0.8572, // ¥0.012 / 1k tokens
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens "embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens "embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens "semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"hunyuan": 7.143, // ¥0.1 / 1k tokens // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0 "hunyuan": 7.143, // ¥0.1 / 1k tokens // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
// https://platform.lingyiwanwu.com/docs#-计费单元 // https://platform.lingyiwanwu.com/docs#-计费单元
// 已经按照 7.2 来换算美元价格 // 已经按照 7.2 来换算美元价格
"yi-34b-chat-0205": 0.18, "yi-34b-chat-0205": 0.18,
@ -143,10 +143,10 @@ var DefaultModelRatio = map[string]float64{
"deepseek-chat": 0.07, "deepseek-chat": 0.07,
"deepseek-coder": 0.07, "deepseek-coder": 0.07,
// Perplexity online 模型对搜索额外收费,有需要应自行调整,此处不计入搜索费用 // Perplexity online 模型对搜索额外收费,有需要应自行调整,此处不计入搜索费用
"llama-3-sonar-small-32k-chat": 0.2 / 1000 * USD, "llama-3-sonar-small-32k-chat": 0.2 / 1000 * USD,
"llama-3-sonar-small-32k-online": 0.2 / 1000 * USD, "llama-3-sonar-small-32k-online": 0.2 / 1000 * USD,
"llama-3-sonar-large-32k-chat": 1 / 1000 * USD, "llama-3-sonar-large-32k-chat": 1 / 1000 * USD,
"llama-3-sonar-large-32k-online": 1 / 1000 * USD, "llama-3-sonar-large-32k-online": 1 / 1000 * USD,
} }
var DefaultModelPrice = map[string]float64{ var DefaultModelPrice = map[string]float64{
@ -289,7 +289,7 @@ func GetCompletionRatio(name string) float64 {
} }
return 4.0 / 3.0 return 4.0 / 3.0
} }
if strings.HasPrefix(name, "gpt-4") && name != "gpt-4-all" && name != "gpt-4-gizmo-*" { if strings.HasPrefix(name, "gpt-4") && !strings.HasSuffix(name, "-all") && !strings.HasSuffix(name, "-gizmo-*") {
if strings.HasPrefix(name, "gpt-4-turbo") || strings.HasSuffix(name, "preview") || strings.HasPrefix(name, "gpt-4o") { if strings.HasPrefix(name, "gpt-4-turbo") || strings.HasSuffix(name, "preview") || strings.HasPrefix(name, "gpt-4o") {
return 3 return 3
} }

View File

@ -41,10 +41,10 @@ func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
return relaycommon.GetFullRequestURL(info.BaseUrl, requestURL, info.ChannelType), nil return relaycommon.GetFullRequestURL(info.BaseUrl, requestURL, info.ChannelType), nil
case common.ChannelTypeMiniMax: case common.ChannelTypeMiniMax:
return minimax.GetRequestURL(info) return minimax.GetRequestURL(info)
//case common.ChannelTypeCustom: case common.ChannelTypeCustom:
// url := info.BaseUrl url := info.BaseUrl
// url = strings.Replace(url, "{model}", info.UpstreamModelName, -1) url = strings.Replace(url, "{model}", info.UpstreamModelName, -1)
// return url, nil return url, nil
default: default:
return relaycommon.GetFullRequestURL(info.BaseUrl, info.RequestURLPath, info.ChannelType), nil return relaycommon.GetFullRequestURL(info.BaseUrl, info.RequestURLPath, info.ChannelType), nil
} }

View File

@ -301,17 +301,18 @@ const EditChannel = (props) => {
const addCustomModels = () => { const addCustomModels = () => {
if (customModel.trim() === '') return; if (customModel.trim() === '') return;
// 使用逗号分隔字符串,然后去除每个模型名称前后的空格 // 使用逗号分隔字符串,然后去除每个模型名称前后的空格
const modelArray = customModel.split(',').map(model => model.trim()); const modelArray = customModel.split(',').map((model) => model.trim());
let localModels = [...inputs.models]; let localModels = [...inputs.models];
let localModelOptions = [...modelOptions]; let localModelOptions = [...modelOptions];
let hasError = false; let hasError = false;
modelArray.forEach(model => { modelArray.forEach((model) => {
// 检查模型是否已存在,且模型名称非空 // 检查模型是否已存在,且模型名称非空
if (model && !localModels.includes(model)) { if (model && !localModels.includes(model)) {
localModels.push(model); // 添加到模型列表 localModels.push(model); // 添加到模型列表
localModelOptions.push({ // 添加到下拉选项 localModelOptions.push({
// 添加到下拉选项
key: model, key: model,
text: model, text: model,
value: model, value: model,
@ -330,7 +331,6 @@ const EditChannel = (props) => {
handleInputChange('models', localModels); handleInputChange('models', localModels);
}; };
return ( return (
<> <>
<SideSheet <SideSheet
@ -433,11 +433,15 @@ const EditChannel = (props) => {
{inputs.type === 8 && ( {inputs.type === 8 && (
<> <>
<div style={{ marginTop: 10 }}> <div style={{ marginTop: 10 }}>
<Typography.Text strong>Base URL</Typography.Text> <Typography.Text strong>
完整的 Base URL支持变量{'{model}'}
</Typography.Text>
</div> </div>
<Input <Input
name='base_url' name='base_url'
placeholder={'请输入自定义渠道的 Base URL'} placeholder={
'请输入完整的URL例如https://api.openai.com/v1/chat/completions'
}
onChange={(value) => { onChange={(value) => {
handleInputChange('base_url', value); handleInputChange('base_url', value);
}} }}