mirror of
https://github.com/linux-do/new-api.git
synced 2025-11-17 19:13:42 +08:00
Compare commits
25 Commits
v0.2.6-alp
...
v0.2.8-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cc020d6a40 | ||
|
|
daa1741aed | ||
|
|
a25bcaa58f | ||
|
|
f07ae8139b | ||
|
|
d860289601 | ||
|
|
cf8fe63fb6 | ||
|
|
1568d6481a | ||
|
|
d05a786b4c | ||
|
|
01160658a5 | ||
|
|
f421699e1b | ||
|
|
f0c884cb55 | ||
|
|
51e0754ade | ||
|
|
1ab93717bb | ||
|
|
d34b601dae | ||
|
|
d6c1e3f37c | ||
|
|
774ce7195c | ||
|
|
dbaa9390d3 | ||
|
|
84da88506f | ||
|
|
98a991306d | ||
|
|
a3de309175 | ||
|
|
de81eba90b | ||
|
|
ea0c99ac1b | ||
|
|
095121673d | ||
|
|
039fda91f2 | ||
|
|
e0df8bbbda |
@@ -229,6 +229,7 @@ const (
|
||||
ChannelTypeLingYiWanWu = 31
|
||||
ChannelTypeAws = 33
|
||||
ChannelTypeCohere = 34
|
||||
ChannelTypeMiniMax = 35
|
||||
|
||||
ChannelTypeDummy // this one is only for count, do not add any channel after this
|
||||
)
|
||||
@@ -269,4 +270,5 @@ var ChannelBaseURLs = []string{
|
||||
"", //32
|
||||
"", //33
|
||||
"https://api.cohere.ai", //34
|
||||
"https://api.minimax.chat", //35
|
||||
}
|
||||
|
||||
@@ -5,6 +5,13 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// from songquanpeng/one-api
|
||||
const (
|
||||
USD2RMB = 7.3 // 暂定 1 USD = 7.3 RMB
|
||||
USD = 500 // $0.002 = 1 -> $1 = 500
|
||||
RMB = USD / USD2RMB
|
||||
)
|
||||
|
||||
// modelRatio
|
||||
// https://platform.openai.com/docs/models/model-endpoint-compatibility
|
||||
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Blfmc9dlf
|
||||
@@ -15,110 +22,119 @@ import (
|
||||
|
||||
var DefaultModelRatio = map[string]float64{
|
||||
//"midjourney": 50,
|
||||
"gpt-4-gizmo-*": 15,
|
||||
"g-*": 15,
|
||||
"gpt-4": 15,
|
||||
"gpt-4-0314": 15,
|
||||
"gpt-4-0613": 15,
|
||||
"gpt-4-32k": 30,
|
||||
"gpt-4-32k-0314": 30,
|
||||
"gpt-4-32k-0613": 30,
|
||||
"gpt-4o": 2.5, // $0.005 / 1K tokens
|
||||
"gpt-4o-2024-05-13": 2.5, // $0.005 / 1K tokens
|
||||
"gpt-4-turbo": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-turbo-2024-04-09": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-1106-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-0125-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-turbo-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-vision-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-1106-vision-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-3.5-turbo": 0.25, // $0.0005 / 1K tokens
|
||||
"gpt-3.5-turbo-0301": 0.75,
|
||||
"gpt-3.5-turbo-0613": 0.75,
|
||||
"gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens
|
||||
"gpt-3.5-turbo-16k-0613": 1.5,
|
||||
"gpt-3.5-turbo-instruct": 0.75, // $0.0015 / 1K tokens
|
||||
"gpt-3.5-turbo-1106": 0.5, // $0.001 / 1K tokens
|
||||
"gpt-3.5-turbo-0125": 0.25,
|
||||
"babbage-002": 0.2, // $0.0004 / 1K tokens
|
||||
"davinci-002": 1, // $0.002 / 1K tokens
|
||||
"text-ada-001": 0.2,
|
||||
"text-babbage-001": 0.25,
|
||||
"text-curie-001": 1,
|
||||
"text-davinci-002": 10,
|
||||
"text-davinci-003": 10,
|
||||
"text-davinci-edit-001": 10,
|
||||
"code-davinci-edit-001": 10,
|
||||
"whisper-1": 15, // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
|
||||
"tts-1": 7.5, // 1k characters -> $0.015
|
||||
"tts-1-1106": 7.5, // 1k characters -> $0.015
|
||||
"tts-1-hd": 15, // 1k characters -> $0.03
|
||||
"tts-1-hd-1106": 15, // 1k characters -> $0.03
|
||||
"davinci": 10,
|
||||
"curie": 10,
|
||||
"babbage": 10,
|
||||
"ada": 10,
|
||||
"text-embedding-3-small": 0.01,
|
||||
"text-embedding-3-large": 0.065,
|
||||
"text-embedding-ada-002": 0.05,
|
||||
"text-search-ada-doc-001": 10,
|
||||
"text-moderation-stable": 0.1,
|
||||
"text-moderation-latest": 0.1,
|
||||
"claude-instant-1": 0.4, // $0.8 / 1M tokens
|
||||
"claude-2.0": 4, // $8 / 1M tokens
|
||||
"claude-2.1": 4, // $8 / 1M tokens
|
||||
"claude-3-haiku-20240307": 0.125, // $0.25 / 1M tokens
|
||||
"claude-3-sonnet-20240229": 1.5, // $3 / 1M tokens
|
||||
"claude-3-opus-20240229": 7.5, // $15 / 1M tokens
|
||||
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens //renamed to ERNIE-3.5-8K
|
||||
"ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens //renamed to ERNIE-Lite-8K
|
||||
"ERNIE-Bot-4": 8.572, // ¥0.12 / 1k tokens //renamed to ERNIE-4.0-8K
|
||||
"ERNIE-4.0-8K": 8.572, // ¥0.12 / 1k tokens
|
||||
"ERNIE-3.5-8K": 0.8572, // ¥0.012 / 1k tokens
|
||||
"ERNIE-Speed-8K": 0.2858, // ¥0.004 / 1k tokens
|
||||
"ERNIE-Speed-128K": 0.2858, // ¥0.004 / 1k tokens
|
||||
"ERNIE-Lite-8K": 0.2143, // ¥0.003 / 1k tokens
|
||||
"ERNIE-Tiny-8K": 0.0715, // ¥0.001 / 1k tokens
|
||||
"ERNIE-Character-8K": 0.2858, // ¥0.004 / 1k tokens
|
||||
"ERNIE-Functions-8K": 0.2858, // ¥0.004 / 1k tokens
|
||||
"Embedding-V1": 0.1429, // ¥0.002 / 1k tokens
|
||||
"PaLM-2": 1,
|
||||
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
"gemini-pro-vision": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
"gemini-1.0-pro-vision-001": 1,
|
||||
"gemini-1.0-pro-001": 1,
|
||||
"gemini-1.5-pro-latest": 1,
|
||||
"gemini-1.5-flash-latest": 1,
|
||||
"gemini-1.0-pro-latest": 1,
|
||||
"gemini-1.0-pro-vision-latest": 1,
|
||||
"gemini-ultra": 1,
|
||||
"chatglm_turbo": 0.3572, // ¥0.005 / 1k tokens
|
||||
"chatglm_pro": 0.7143, // ¥0.01 / 1k tokens
|
||||
"chatglm_std": 0.3572, // ¥0.005 / 1k tokens
|
||||
"chatglm_lite": 0.1429, // ¥0.002 / 1k tokens
|
||||
"glm-4": 7.143, // ¥0.1 / 1k tokens
|
||||
"glm-4v": 7.143, // ¥0.1 / 1k tokens
|
||||
"glm-3-turbo": 0.3572,
|
||||
"qwen-turbo": 0.8572, // ¥0.012 / 1k tokens
|
||||
"qwen-plus": 10, // ¥0.14 / 1k tokens
|
||||
"text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens
|
||||
"SparkDesk-v1.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens
|
||||
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
|
||||
"360gpt-turbo": 0.0858, // ¥0.0012 / 1k tokens
|
||||
"gpt-4-gizmo-*": 15,
|
||||
"g-*": 15,
|
||||
"gpt-4": 15,
|
||||
"gpt-4-0314": 15,
|
||||
"gpt-4-0613": 15,
|
||||
"gpt-4-32k": 30,
|
||||
"gpt-4-32k-0314": 30,
|
||||
"gpt-4-32k-0613": 30,
|
||||
"gpt-4o": 2.5, // $0.005 / 1K tokens
|
||||
"gpt-4o-2024-05-13": 2.5, // $0.005 / 1K tokens
|
||||
"gpt-4-turbo": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-turbo-2024-04-09": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-1106-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-0125-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-turbo-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-vision-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-1106-vision-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-3.5-turbo": 0.25, // $0.0005 / 1K tokens
|
||||
"gpt-3.5-turbo-0301": 0.75,
|
||||
"gpt-3.5-turbo-0613": 0.75,
|
||||
"gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens
|
||||
"gpt-3.5-turbo-16k-0613": 1.5,
|
||||
"gpt-3.5-turbo-instruct": 0.75, // $0.0015 / 1K tokens
|
||||
"gpt-3.5-turbo-1106": 0.5, // $0.001 / 1K tokens
|
||||
"gpt-3.5-turbo-0125": 0.25,
|
||||
"babbage-002": 0.2, // $0.0004 / 1K tokens
|
||||
"davinci-002": 1, // $0.002 / 1K tokens
|
||||
"text-ada-001": 0.2,
|
||||
"text-babbage-001": 0.25,
|
||||
"text-curie-001": 1,
|
||||
"text-davinci-002": 10,
|
||||
"text-davinci-003": 10,
|
||||
"text-davinci-edit-001": 10,
|
||||
"code-davinci-edit-001": 10,
|
||||
"whisper-1": 15, // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
|
||||
"tts-1": 7.5, // 1k characters -> $0.015
|
||||
"tts-1-1106": 7.5, // 1k characters -> $0.015
|
||||
"tts-1-hd": 15, // 1k characters -> $0.03
|
||||
"tts-1-hd-1106": 15, // 1k characters -> $0.03
|
||||
"davinci": 10,
|
||||
"curie": 10,
|
||||
"babbage": 10,
|
||||
"ada": 10,
|
||||
"text-embedding-3-small": 0.01,
|
||||
"text-embedding-3-large": 0.065,
|
||||
"text-embedding-ada-002": 0.05,
|
||||
"text-search-ada-doc-001": 10,
|
||||
"text-moderation-stable": 0.1,
|
||||
"text-moderation-latest": 0.1,
|
||||
"claude-instant-1": 0.4, // $0.8 / 1M tokens
|
||||
"claude-2.0": 4, // $8 / 1M tokens
|
||||
"claude-2.1": 4, // $8 / 1M tokens
|
||||
"claude-3-haiku-20240307": 0.125, // $0.25 / 1M tokens
|
||||
"claude-3-sonnet-20240229": 1.5, // $3 / 1M tokens
|
||||
"claude-3-opus-20240229": 7.5, // $15 / 1M tokens
|
||||
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens //renamed to ERNIE-3.5-8K
|
||||
"ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens //renamed to ERNIE-Lite-8K
|
||||
"ERNIE-Bot-4": 8.572, // ¥0.12 / 1k tokens //renamed to ERNIE-4.0-8K
|
||||
"ERNIE-4.0-8K": 8.572, // ¥0.12 / 1k tokens
|
||||
"ERNIE-3.5-8K": 0.8572, // ¥0.012 / 1k tokens
|
||||
"ERNIE-Speed-8K": 0.2858, // ¥0.004 / 1k tokens
|
||||
"ERNIE-Speed-128K": 0.2858, // ¥0.004 / 1k tokens
|
||||
"ERNIE-Lite-8K": 0.2143, // ¥0.003 / 1k tokens
|
||||
"ERNIE-Tiny-8K": 0.0715, // ¥0.001 / 1k tokens
|
||||
"ERNIE-Character-8K": 0.2858, // ¥0.004 / 1k tokens
|
||||
"ERNIE-Functions-8K": 0.2858, // ¥0.004 / 1k tokens
|
||||
"Embedding-V1": 0.1429, // ¥0.002 / 1k tokens
|
||||
"PaLM-2": 1,
|
||||
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
"gemini-pro-vision": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
"gemini-1.0-pro-vision-001": 1,
|
||||
"gemini-1.0-pro-001": 1,
|
||||
"gemini-1.5-pro-latest": 1,
|
||||
"gemini-1.5-flash-latest": 1,
|
||||
"gemini-1.0-pro-latest": 1,
|
||||
"gemini-1.0-pro-vision-latest": 1,
|
||||
"gemini-ultra": 1,
|
||||
"chatglm_turbo": 0.3572, // ¥0.005 / 1k tokens
|
||||
"chatglm_pro": 0.7143, // ¥0.01 / 1k tokens
|
||||
"chatglm_std": 0.3572, // ¥0.005 / 1k tokens
|
||||
"chatglm_lite": 0.1429, // ¥0.002 / 1k tokens
|
||||
"glm-4": 7.143, // ¥0.1 / 1k tokens
|
||||
"glm-4v": 7.143, // ¥0.1 / 1k tokens
|
||||
"glm-3-turbo": 0.3572,
|
||||
"qwen-turbo": 0.8572, // ¥0.012 / 1k tokens
|
||||
"qwen-plus": 10, // ¥0.14 / 1k tokens
|
||||
"text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens
|
||||
"SparkDesk-v1.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens
|
||||
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
|
||||
"360gpt-turbo": 0.0858, // ¥0.0012 / 1k tokens
|
||||
"360gpt-turbo-responsibility-8k": 0.8572, // ¥0.012 / 1k tokens
|
||||
"360gpt-pro": 0.8572, // ¥0.012 / 1k tokens
|
||||
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"hunyuan": 7.143, // ¥0.1 / 1k tokens // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
|
||||
"360gpt-pro": 0.8572, // ¥0.012 / 1k tokens
|
||||
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"hunyuan": 7.143, // ¥0.1 / 1k tokens // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
|
||||
// https://platform.lingyiwanwu.com/docs#-计费单元
|
||||
// 已经按照 7.2 来换算美元价格
|
||||
"yi-34b-chat-0205": 0.018,
|
||||
"yi-34b-chat-200k": 0.0864,
|
||||
"yi-vl-plus": 0.0432,
|
||||
"yi-34b-chat-0205": 0.18,
|
||||
"yi-34b-chat-200k": 0.864,
|
||||
"yi-vl-plus": 0.432,
|
||||
"yi-large": 20.0 / 1000 * RMB,
|
||||
"yi-medium": 2.5 / 1000 * RMB,
|
||||
"yi-vision": 6.0 / 1000 * RMB,
|
||||
"yi-medium-200k": 12.0 / 1000 * RMB,
|
||||
"yi-spark": 1.0 / 1000 * RMB,
|
||||
"yi-large-rag": 25.0 / 1000 * RMB,
|
||||
"yi-large-turbo": 12.0 / 1000 * RMB,
|
||||
"yi-large-preview": 20.0 / 1000 * RMB,
|
||||
"yi-large-rag-preview": 25.0 / 1000 * RMB,
|
||||
"command": 0.5,
|
||||
"command-nightly": 0.5,
|
||||
"command-light": 0.5,
|
||||
@@ -127,6 +143,11 @@ var DefaultModelRatio = map[string]float64{
|
||||
"command-r-plus ": 1.5,
|
||||
"deepseek-chat": 0.07,
|
||||
"deepseek-coder": 0.07,
|
||||
// Perplexity online 模型对搜索额外收费,有需要应自行调整,此处不计入搜索费用
|
||||
"llama-3-sonar-small-32k-chat": 0.2 / 1000 * USD,
|
||||
"llama-3-sonar-small-32k-online": 0.2 / 1000 * USD,
|
||||
"llama-3-sonar-large-32k-chat": 1 / 1000 * USD,
|
||||
"llama-3-sonar-large-32k-online": 1 / 1000 * USD,
|
||||
}
|
||||
|
||||
var DefaultModelPrice = map[string]float64{
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"one-api/relay"
|
||||
"one-api/relay/channel/ai360"
|
||||
"one-api/relay/channel/lingyiwanwu"
|
||||
"one-api/relay/channel/minimax"
|
||||
"one-api/relay/channel/moonshot"
|
||||
relaycommon "one-api/relay/common"
|
||||
relayconstant "one-api/relay/constant"
|
||||
@@ -79,7 +80,7 @@ func init() {
|
||||
Id: modelName,
|
||||
Object: "model",
|
||||
Created: 1626777600,
|
||||
OwnedBy: "moonshot",
|
||||
OwnedBy: moonshot.ChannelName,
|
||||
Permission: permission,
|
||||
Root: modelName,
|
||||
Parent: nil,
|
||||
@@ -90,7 +91,18 @@ func init() {
|
||||
Id: modelName,
|
||||
Object: "model",
|
||||
Created: 1626777600,
|
||||
OwnedBy: "lingyiwanwu",
|
||||
OwnedBy: lingyiwanwu.ChannelName,
|
||||
Permission: permission,
|
||||
Root: modelName,
|
||||
Parent: nil,
|
||||
})
|
||||
}
|
||||
for _, modelName := range minimax.ModelList {
|
||||
openAIModels = append(openAIModels, dto.OpenAIModels{
|
||||
Id: modelName,
|
||||
Object: "model",
|
||||
Created: 1626777600,
|
||||
OwnedBy: minimax.ChannelName,
|
||||
Permission: permission,
|
||||
Root: modelName,
|
||||
Parent: nil,
|
||||
|
||||
@@ -10,11 +10,11 @@ type GeneralOpenAIRequest struct {
|
||||
Model string `json:"model,omitempty"`
|
||||
Messages []Message `json:"messages,omitempty"`
|
||||
Prompt any `json:"prompt,omitempty"`
|
||||
BestOf int `json:"best_of"`
|
||||
Echo bool `json:"echo"`
|
||||
BestOf int `json:"best_of,omitempty"`
|
||||
Echo bool `json:"echo,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
StreamOptions any `json:"stream_options"`
|
||||
Suffix string `json:"suffix"`
|
||||
StreamOptions any `json:"stream_options,omitempty"`
|
||||
Suffix string `json:"suffix,omitempty"`
|
||||
MaxTokens uint `json:"max_tokens,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
@@ -32,8 +32,8 @@ type GeneralOpenAIRequest struct {
|
||||
Tools any `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
LogitBias any `json:"logit_bias"`
|
||||
LogProbs bool `json:"logprobs,omitempty"`
|
||||
LogitBias any `json:"logit_bias,omitempty"`
|
||||
LogProbs any `json:"logprobs,omitempty"`
|
||||
TopLogProbs int `json:"top_logprobs,omitempty"`
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package lingyiwanwu
|
||||
|
||||
// https://platform.lingyiwanwu.com/docs
|
||||
|
||||
var ModelList = []string{
|
||||
"yi-34b-chat-0205",
|
||||
"yi-34b-chat-200k",
|
||||
"yi-vl-plus",
|
||||
}
|
||||
package lingyiwanwu
|
||||
|
||||
// https://platform.lingyiwanwu.com/docs
|
||||
|
||||
var ModelList = []string{
|
||||
"yi-large", "yi-medium", "yi-vision", "yi-medium-200k", "yi-spark", "yi-large-rag", "yi-large-turbo", "yi-large-preview", "yi-large-rag-preview",
|
||||
}
|
||||
|
||||
var ChannelName = "lingyiwanwu"
|
||||
|
||||
13
relay/channel/minimax/constants.go
Normal file
13
relay/channel/minimax/constants.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package minimax
|
||||
|
||||
// https://www.minimaxi.com/document/guides/chat-model/V2?id=65e0736ab2845de20908e2dd
|
||||
|
||||
var ModelList = []string{
|
||||
"abab6.5-chat",
|
||||
"abab6.5s-chat",
|
||||
"abab6-chat",
|
||||
"abab5.5-chat",
|
||||
"abab5.5s-chat",
|
||||
}
|
||||
|
||||
var ChannelName = "minimax"
|
||||
10
relay/channel/minimax/relay-minimax.go
Normal file
10
relay/channel/minimax/relay-minimax.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package minimax
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
relaycommon "one-api/relay/common"
|
||||
)
|
||||
|
||||
func GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
|
||||
return fmt.Sprintf("%s/v1/text/chatcompletion_v2", info.BaseUrl), nil
|
||||
}
|
||||
@@ -5,3 +5,5 @@ var ModelList = []string{
|
||||
"moonshot-v1-32k",
|
||||
"moonshot-v1-128k",
|
||||
}
|
||||
|
||||
var ChannelName = "moonshot"
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"one-api/relay/channel"
|
||||
"one-api/relay/channel/ai360"
|
||||
"one-api/relay/channel/lingyiwanwu"
|
||||
"one-api/relay/channel/minimax"
|
||||
"one-api/relay/channel/moonshot"
|
||||
relaycommon "one-api/relay/common"
|
||||
"one-api/service"
|
||||
@@ -26,7 +27,8 @@ func (a *Adaptor) Init(info *relaycommon.RelayInfo, request dto.GeneralOpenAIReq
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
|
||||
if info.ChannelType == common.ChannelTypeAzure {
|
||||
switch info.ChannelType {
|
||||
case common.ChannelTypeAzure:
|
||||
// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api
|
||||
requestURL := strings.Split(info.RequestURLPath, "?")[0]
|
||||
requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, info.ApiVersion)
|
||||
@@ -37,8 +39,15 @@ func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
|
||||
|
||||
requestURL = fmt.Sprintf("/openai/deployments/%s/%s", model_, task)
|
||||
return relaycommon.GetFullRequestURL(info.BaseUrl, requestURL, info.ChannelType), nil
|
||||
case common.ChannelTypeMiniMax:
|
||||
return minimax.GetRequestURL(info)
|
||||
case common.ChannelTypeCustom:
|
||||
url := info.BaseUrl
|
||||
url = strings.Replace(url, "{model}", info.UpstreamModelName, -1)
|
||||
return url, nil
|
||||
default:
|
||||
return relaycommon.GetFullRequestURL(info.BaseUrl, info.RequestURLPath, info.ChannelType), nil
|
||||
}
|
||||
return relaycommon.GetFullRequestURL(info.BaseUrl, info.RequestURLPath, info.ChannelType), nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, info *relaycommon.RelayInfo) error {
|
||||
@@ -90,11 +99,24 @@ func (a *Adaptor) GetModelList() []string {
|
||||
return moonshot.ModelList
|
||||
case common.ChannelTypeLingYiWanWu:
|
||||
return lingyiwanwu.ModelList
|
||||
case common.ChannelTypeMiniMax:
|
||||
return minimax.ModelList
|
||||
default:
|
||||
return ModelList
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetChannelName() string {
|
||||
return ChannelName
|
||||
switch a.ChannelType {
|
||||
case common.ChannelType360:
|
||||
return ai360.ChannelName
|
||||
case common.ChannelTypeMoonshot:
|
||||
return moonshot.ChannelName
|
||||
case common.ChannelTypeLingYiWanWu:
|
||||
return lingyiwanwu.ChannelName
|
||||
case common.ChannelTypeMiniMax:
|
||||
return minimax.ChannelName
|
||||
default:
|
||||
return ChannelName
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package perplexity
|
||||
|
||||
var ModelList = []string{
|
||||
"sonar-small-chat", "sonar-small-online", "sonar-medium-chat", "sonar-medium-online", "mistral-7b-instruct", "mixtral-8x7b-instruct",
|
||||
"llama-3-sonar-small-32k-chat", "llama-3-sonar-small-32k-online", "llama-3-sonar-large-32k-chat", "llama-3-sonar-large-32k-online", "llama-3-8b-instruct", "llama-3-70b-instruct", "mixtral-8x7b-instruct",
|
||||
}
|
||||
|
||||
var ChannelName = "perplexity"
|
||||
|
||||
@@ -191,7 +191,15 @@ func getPromptTokens(textRequest *dto.GeneralOpenAIRequest, info *relaycommon.Re
|
||||
case relayconstant.RelayModeChatCompletions:
|
||||
promptTokens, err, sensitiveTrigger = service.CountTokenChatRequest(*textRequest, textRequest.Model, checkSensitive)
|
||||
case relayconstant.RelayModeCompletions:
|
||||
promptTokens, err, sensitiveTrigger = service.CountTokenInput(textRequest.Prompt, textRequest.Model, checkSensitive)
|
||||
prompts := textRequest.Prompt
|
||||
switch v := prompts.(type) {
|
||||
case string:
|
||||
prompts = v + textRequest.Suffix
|
||||
case []string:
|
||||
prompts = append(v, textRequest.Suffix)
|
||||
}
|
||||
|
||||
promptTokens, err, sensitiveTrigger = service.CountTokenInput(prompts, textRequest.Model, checkSensitive)
|
||||
case relayconstant.RelayModeModerations:
|
||||
promptTokens, err, sensitiveTrigger = service.CountTokenInput(textRequest.Input, textRequest.Model, checkSensitive)
|
||||
case relayconstant.RelayModeEmbeddings:
|
||||
|
||||
@@ -69,7 +69,11 @@ func getTokenNum(tokenEncoder *tiktoken.Tiktoken, text string) int {
|
||||
return len(tokenEncoder.Encode(text, nil, nil))
|
||||
}
|
||||
|
||||
func getImageToken(imageUrl *dto.MessageImageUrl) (int, error) {
|
||||
func getImageToken(imageUrl *dto.MessageImageUrl, model string, stream bool) (int, error) {
|
||||
// TODO: 非流模式下不计算图片token数量
|
||||
if model == "glm-4v" {
|
||||
return 1047, nil
|
||||
}
|
||||
if imageUrl.Detail == "low" {
|
||||
return 85, nil
|
||||
}
|
||||
@@ -125,7 +129,7 @@ func getImageToken(imageUrl *dto.MessageImageUrl) (int, error) {
|
||||
|
||||
func CountTokenChatRequest(request dto.GeneralOpenAIRequest, model string, checkSensitive bool) (int, error, bool) {
|
||||
tkm := 0
|
||||
msgTokens, err, b := CountTokenMessages(request.Messages, model, checkSensitive)
|
||||
msgTokens, err, b := CountTokenMessages(request.Messages, model, request.Stream, checkSensitive)
|
||||
if err != nil {
|
||||
return 0, err, b
|
||||
}
|
||||
@@ -158,7 +162,7 @@ func CountTokenChatRequest(request dto.GeneralOpenAIRequest, model string, check
|
||||
return tkm, nil, false
|
||||
}
|
||||
|
||||
func CountTokenMessages(messages []dto.Message, model string, checkSensitive bool) (int, error, bool) {
|
||||
func CountTokenMessages(messages []dto.Message, model string, stream bool, checkSensitive bool) (int, error, bool) {
|
||||
//recover when panic
|
||||
tokenEncoder := getTokenEncoder(model)
|
||||
// Reference:
|
||||
@@ -195,19 +199,13 @@ func CountTokenMessages(messages []dto.Message, model string, checkSensitive boo
|
||||
tokenNum += getTokenNum(tokenEncoder, *message.Name)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
arrayContent := message.ParseContent()
|
||||
for _, m := range arrayContent {
|
||||
if m.Type == "image_url" {
|
||||
var imageTokenNum int
|
||||
if model == "glm-4v" {
|
||||
imageTokenNum = 1047
|
||||
} else {
|
||||
imageUrl := m.ImageUrl.(dto.MessageImageUrl)
|
||||
imageTokenNum, err = getImageToken(&imageUrl)
|
||||
if err != nil {
|
||||
return 0, err, false
|
||||
}
|
||||
imageUrl := m.ImageUrl.(dto.MessageImageUrl)
|
||||
imageTokenNum, err := getImageToken(&imageUrl, model, stream)
|
||||
if err != nil {
|
||||
return 0, err, false
|
||||
}
|
||||
tokenNum += imageTokenNum
|
||||
log.Printf("image token num: %d", imageTokenNum)
|
||||
|
||||
@@ -310,12 +310,12 @@ const ChannelsTable = () => {
|
||||
|
||||
const setChannelFormat = (channels) => {
|
||||
for (let i = 0; i < channels.length; i++) {
|
||||
if (channels[i].type === 8) {
|
||||
showWarning(
|
||||
'检测到您使用了“自定义渠道”类型,请更换为“OpenAI”渠道类型!',
|
||||
);
|
||||
showWarning('下个版本将不再支持“自定义渠道”类型!');
|
||||
}
|
||||
// if (channels[i].type === 8) {
|
||||
// showWarning(
|
||||
// '检测到您使用了“自定义渠道”类型,请更换为“OpenAI”渠道类型!',
|
||||
// );
|
||||
// showWarning('下个版本将不再支持“自定义渠道”类型!');
|
||||
// }
|
||||
channels[i].key = '' + channels[i].id;
|
||||
let test_models = [];
|
||||
channels[i].models.split(',').forEach((item, index) => {
|
||||
|
||||
@@ -302,6 +302,9 @@ const LogsTable = () => {
|
||||
let content = '渠道:' + record.channel;
|
||||
if (record.other !== '') {
|
||||
let other = JSON.parse(record.other);
|
||||
if (other === null) {
|
||||
return <></>;
|
||||
}
|
||||
if (other.admin_info !== undefined) {
|
||||
if (
|
||||
other.admin_info.use_channel !== null &&
|
||||
@@ -323,6 +326,10 @@ const LogsTable = () => {
|
||||
dataIndex: 'content',
|
||||
render: (text, record, index) => {
|
||||
if (record.other === '') {
|
||||
record.other = '{}';
|
||||
}
|
||||
let other = JSON.parse(record.other);
|
||||
if (other == null) {
|
||||
return (
|
||||
<Paragraph
|
||||
ellipsis={{
|
||||
@@ -338,7 +345,6 @@ const LogsTable = () => {
|
||||
</Paragraph>
|
||||
);
|
||||
}
|
||||
let other = JSON.parse(record.other);
|
||||
let content = renderModelPrice(
|
||||
record.prompt_tokens,
|
||||
record.completion_tokens,
|
||||
|
||||
@@ -146,11 +146,12 @@ const ModelPricing = () => {
|
||||
render: (text, record, index) => {
|
||||
let content = text;
|
||||
if (record.quota_type === 0) {
|
||||
let inputRatioPrice = record.model_ratio * 2.0 * record.group_ratio;
|
||||
// 这里的 *2 是因为 1倍率=0.002刀,请勿删除
|
||||
let inputRatioPrice = record.model_ratio * 2 * record.group_ratio;
|
||||
let completionRatioPrice =
|
||||
record.model_ratio *
|
||||
record.completion_ratio *
|
||||
2.0 *
|
||||
2 *
|
||||
record.group_ratio;
|
||||
content = (
|
||||
<>
|
||||
|
||||
@@ -36,13 +36,6 @@ export const CHANNEL_OPTIONS = [
|
||||
color: 'teal',
|
||||
label: 'Azure OpenAI',
|
||||
},
|
||||
{
|
||||
key: 11,
|
||||
text: 'Google PaLM2',
|
||||
value: 11,
|
||||
color: 'orange',
|
||||
label: 'Google PaLM2',
|
||||
},
|
||||
{
|
||||
key: 24,
|
||||
text: 'Google Gemini',
|
||||
@@ -92,10 +85,18 @@ export const CHANNEL_OPTIONS = [
|
||||
color: 'purple',
|
||||
label: '智谱 GLM-4V',
|
||||
},
|
||||
{
|
||||
key: 11,
|
||||
text: 'Google PaLM2',
|
||||
value: 11,
|
||||
color: 'orange',
|
||||
label: 'Google PaLM2',
|
||||
},
|
||||
{ key: 25, text: 'Moonshot', value: 25, color: 'green', label: 'Moonshot' },
|
||||
{ key: 19, text: '360 智脑', value: 19, color: 'blue', label: '360 智脑' },
|
||||
{ key: 23, text: '腾讯混元', value: 23, color: 'teal', label: '腾讯混元' },
|
||||
{ key: 31, text: '零一万物', value: 31, color: 'green', label: '零一万物' },
|
||||
{ key: 35, text: 'MiniMax', value: 35, color: 'green', label: 'MiniMax' },
|
||||
{ key: 8, text: '自定义渠道', value: 8, color: 'pink', label: '自定义渠道' },
|
||||
{
|
||||
key: 22,
|
||||
|
||||
@@ -149,8 +149,9 @@ export function renderModelPrice(
|
||||
if (completionRatio === undefined) {
|
||||
completionRatio = 0;
|
||||
}
|
||||
// 这里的 *2 是因为 1倍率=0.002刀,请勿删除
|
||||
let inputRatioPrice = modelRatio * 2.0 * groupRatio;
|
||||
let completionRatioPrice = modelRatio * completionRatio * 2.0 * groupRatio;
|
||||
let completionRatioPrice = modelRatio * 2.0 * completionRatio * groupRatio;
|
||||
let price =
|
||||
(inputTokens / 1000000) * inputRatioPrice +
|
||||
(completionTokens / 1000000) * completionRatioPrice;
|
||||
|
||||
@@ -433,11 +433,15 @@ const EditChannel = (props) => {
|
||||
{inputs.type === 8 && (
|
||||
<>
|
||||
<div style={{ marginTop: 10 }}>
|
||||
<Typography.Text strong>Base URL:</Typography.Text>
|
||||
<Typography.Text strong>
|
||||
完整的 Base URL,支持变量{'{model}'}:
|
||||
</Typography.Text>
|
||||
</div>
|
||||
<Input
|
||||
name='base_url'
|
||||
placeholder={'请输入自定义渠道的 Base URL'}
|
||||
placeholder={
|
||||
'请输入完整的URL,例如:https://api.openai.com/v1/chat/completions'
|
||||
}
|
||||
onChange={(value) => {
|
||||
handleInputChange('base_url', value);
|
||||
}}
|
||||
|
||||
@@ -135,7 +135,7 @@ export default function SettingsMagnification(props) {
|
||||
<Row gutter={16}>
|
||||
<Col span={16}>
|
||||
<Form.TextArea
|
||||
label={'模型补全倍率'}
|
||||
label={'模型补全倍率(仅对自定义模型有效)'}
|
||||
extraText={'仅对自定义模型有效'}
|
||||
placeholder={'为一个 JSON 文本,键为模型名称,值为倍率'}
|
||||
field={'CompletionRatio'}
|
||||
|
||||
Reference in New Issue
Block a user