Compare commits

...

18 Commits

Author SHA1 Message Date
Laisky.Cai
dcb77aadf8 Merge 6e634b85cf into 8df4a2670b 2025-03-12 00:34:26 +00:00
Laisky.Cai
6e634b85cf fix: update StreamHandler to support cross-region model IDs for AWS 2025-03-12 00:34:15 +00:00
Laisky.Cai
a0d7d5a965 fix: support thinking for aws claude 2025-03-10 07:00:45 +00:00
Laisky.Cai
de10e102bd feat: add support for aws's cross region inferences
closes #2024, closes #2145
2025-03-10 06:43:40 +00:00
Laisky.Cai
c61d6440f9 fix: claude thinking for non-stream mode 2025-02-25 03:14:18 +00:00
Laisky.Cai
3a8924d7af feat: add support for extended reasoning in Claude 3.7 model 2025-02-25 03:02:51 +00:00
Laisky.Cai
95527d76ef feat: update model list and pricing for Claude 3.7 versions 2025-02-25 03:02:24 +00:00
JustSong
8df4a2670b docs: update ByteDance Doubao model link in README
Some checks failed
CI / Unit tests (push) Has been cancelled
CI / commit_lint (push) Has been cancelled
2025-02-21 19:30:16 +08:00
Laisky.Cai
7ec33793b7 feat: add OpenrouterProviderSort configuration for provider sorting 2025-02-20 01:51:45 +00:00
Laisky.Cai
1a6812182b fix: improve reasoning token counting in OpenAI adaptor 2025-02-19 09:13:24 +00:00
Laisky.Cai
5ba60433d7 feat: enhance reasoning token handling in OpenAI adaptor 2025-02-19 08:10:19 +00:00
Laisky.Cai
480f248a3d feat: support OpenRouter reasoning 2025-02-19 01:20:14 +00:00
longkeyy
7ac553541b feat: update openrouter models and price 20250213 (#2084)
Some checks failed
CI / Unit tests (push) Has been cancelled
CI / commit_lint (push) Has been cancelled
2025-02-16 18:01:59 +08:00
longkeyy
a5c517c27a feat: update ali models and price 20250213 (#2086) 2025-02-16 18:01:24 +08:00
JustSong
3f421c4f04 feat: support Gemini openai compatible api 2025-02-16 17:59:39 +08:00
JustSong
1ce6a226f6 chore: update prompt 2025-02-16 17:42:20 +08:00
JustSong
cafd0a0327 feat: add OpenAI compatible channel (close #2091) 2025-02-16 17:38:06 +08:00
JustSong
8b8cd03e85 feat: add balance not supported message in ChannelsTable
Some checks failed
CI / Unit tests (push) Has been cancelled
CI / commit_lint (push) Has been cancelled
2025-02-12 01:20:28 +08:00
36 changed files with 1068 additions and 228 deletions

View File

@@ -44,4 +44,4 @@ COPY --from=builder2 /build/one-api /
EXPOSE 3000 EXPOSE 3000
WORKDIR /data WORKDIR /data
ENTRYPOINT ["/one-api"] ENTRYPOINT ["/one-api"]

View File

@@ -72,7 +72,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
+ [x] [Anthropic Claude 系列模型](https://anthropic.com) (支持 AWS Claude) + [x] [Anthropic Claude 系列模型](https://anthropic.com) (支持 AWS Claude)
+ [x] [Google PaLM2/Gemini 系列模型](https://developers.generativeai.google) + [x] [Google PaLM2/Gemini 系列模型](https://developers.generativeai.google)
+ [x] [Mistral 系列模型](https://mistral.ai/) + [x] [Mistral 系列模型](https://mistral.ai/)
+ [x] [字节跳动豆包大模型](https://console.volcengine.com/ark/region:ark+cn-beijing/model) + [x] [字节跳动豆包大模型(火山引擎)](https://www.volcengine.com/experience/ark?utm_term=202502dsinvite&ac=DSASUQY5&rc=2QXCA1VI)
+ [x] [百度文心一言系列模型](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html) + [x] [百度文心一言系列模型](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html)
+ [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html) + [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html)
+ [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html) + [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html)
@@ -385,7 +385,7 @@ graph LR
+ 例子:`NODE_TYPE=slave` + 例子:`NODE_TYPE=slave`
9. `CHANNEL_UPDATE_FREQUENCY`:设置之后将定期更新渠道余额,单位为分钟,未设置则不进行更新。 9. `CHANNEL_UPDATE_FREQUENCY`:设置之后将定期更新渠道余额,单位为分钟,未设置则不进行更新。
+ 例子:`CHANNEL_UPDATE_FREQUENCY=1440` + 例子:`CHANNEL_UPDATE_FREQUENCY=1440`
10. `CHANNEL_TEST_FREQUENCY`:设置之后将定期检查渠道,单位为分钟,未设置则不进行检查。 10. `CHANNEL_TEST_FREQUENCY`:设置之后将定期检查渠道,单位为分钟,未设置则不进行检查。
+例子:`CHANNEL_TEST_FREQUENCY=1440` +例子:`CHANNEL_TEST_FREQUENCY=1440`
11. `POLLING_INTERVAL`:批量更新渠道余额以及测试可用性时的请求间隔,单位为秒,默认无间隔。 11. `POLLING_INTERVAL`:批量更新渠道余额以及测试可用性时的请求间隔,单位为秒,默认无间隔。
+ 例子:`POLLING_INTERVAL=5` + 例子:`POLLING_INTERVAL=5`

View File

@@ -164,3 +164,6 @@ var UserContentRequestTimeout = env.Int("USER_CONTENT_REQUEST_TIMEOUT", 30)
var EnforceIncludeUsage = env.Bool("ENFORCE_INCLUDE_USAGE", false) var EnforceIncludeUsage = env.Bool("ENFORCE_INCLUDE_USAGE", false)
var TestPrompt = env.String("TEST_PROMPT", "Output only your specific model name with no additional text.") var TestPrompt = env.String("TEST_PROMPT", "Output only your specific model name with no additional text.")
// OpenrouterProviderSort is used to determine the order of the providers in the openrouter
var OpenrouterProviderSort = env.String("OPENROUTER_PROVIDER_SORT", "")

View File

@@ -1,6 +1,9 @@
package conv package conv
func AsString(v any) string { func AsString(v any) string {
str, _ := v.(string) if str, ok := v.(string); ok {
return str return str
}
return ""
} }

View File

@@ -14,10 +14,14 @@ var ModelList = []string{
"qwen2-72b-instruct", "qwen2-57b-a14b-instruct", "qwen2-7b-instruct", "qwen2-1.5b-instruct", "qwen2-0.5b-instruct", "qwen2-72b-instruct", "qwen2-57b-a14b-instruct", "qwen2-7b-instruct", "qwen2-1.5b-instruct", "qwen2-0.5b-instruct",
"qwen1.5-110b-chat", "qwen1.5-72b-chat", "qwen1.5-32b-chat", "qwen1.5-14b-chat", "qwen1.5-7b-chat", "qwen1.5-1.8b-chat", "qwen1.5-0.5b-chat", "qwen1.5-110b-chat", "qwen1.5-72b-chat", "qwen1.5-32b-chat", "qwen1.5-14b-chat", "qwen1.5-7b-chat", "qwen1.5-1.8b-chat", "qwen1.5-0.5b-chat",
"qwen-72b-chat", "qwen-14b-chat", "qwen-7b-chat", "qwen-1.8b-chat", "qwen-1.8b-longcontext-chat", "qwen-72b-chat", "qwen-14b-chat", "qwen-7b-chat", "qwen-1.8b-chat", "qwen-1.8b-longcontext-chat",
"qvq-72b-preview",
"qwen2.5-vl-72b-instruct", "qwen2.5-vl-7b-instruct", "qwen2.5-vl-2b-instruct", "qwen2.5-vl-1b-instruct", "qwen2.5-vl-0.5b-instruct",
"qwen2-vl-7b-instruct", "qwen2-vl-2b-instruct", "qwen-vl-v1", "qwen-vl-chat-v1", "qwen2-vl-7b-instruct", "qwen2-vl-2b-instruct", "qwen-vl-v1", "qwen-vl-chat-v1",
"qwen2-audio-instruct", "qwen-audio-chat", "qwen2-audio-instruct", "qwen-audio-chat",
"qwen2.5-math-72b-instruct", "qwen2.5-math-7b-instruct", "qwen2.5-math-1.5b-instruct", "qwen2-math-72b-instruct", "qwen2-math-7b-instruct", "qwen2-math-1.5b-instruct", "qwen2.5-math-72b-instruct", "qwen2.5-math-7b-instruct", "qwen2.5-math-1.5b-instruct", "qwen2-math-72b-instruct", "qwen2-math-7b-instruct", "qwen2-math-1.5b-instruct",
"qwen2.5-coder-32b-instruct", "qwen2.5-coder-14b-instruct", "qwen2.5-coder-7b-instruct", "qwen2.5-coder-3b-instruct", "qwen2.5-coder-1.5b-instruct", "qwen2.5-coder-0.5b-instruct", "qwen2.5-coder-32b-instruct", "qwen2.5-coder-14b-instruct", "qwen2.5-coder-7b-instruct", "qwen2.5-coder-3b-instruct", "qwen2.5-coder-1.5b-instruct", "qwen2.5-coder-0.5b-instruct",
"text-embedding-v1", "text-embedding-v3", "text-embedding-v2", "text-embedding-async-v2", "text-embedding-async-v1", "text-embedding-v1", "text-embedding-v3", "text-embedding-v2", "text-embedding-async-v2", "text-embedding-async-v1",
"ali-stable-diffusion-xl", "ali-stable-diffusion-v1.5", "wanx-v1", "ali-stable-diffusion-xl", "ali-stable-diffusion-v1.5", "wanx-v1",
"qwen-mt-plus", "qwen-mt-turbo",
"deepseek-r1", "deepseek-v3", "deepseek-r1-distill-qwen-1.5b", "deepseek-r1-distill-qwen-7b", "deepseek-r1-distill-qwen-14b", "deepseek-r1-distill-qwen-32b", "deepseek-r1-distill-llama-8b", "deepseek-r1-distill-llama-70b",
} }

View File

@@ -36,8 +36,8 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *me
// https://x.com/alexalbert__/status/1812921642143900036 // https://x.com/alexalbert__/status/1812921642143900036
// claude-3-5-sonnet can support 8k context // claude-3-5-sonnet can support 8k context
if strings.HasPrefix(meta.ActualModelName, "claude-3-5-sonnet") { if strings.HasPrefix(meta.ActualModelName, "claude-3-7-sonnet") {
req.Header.Set("anthropic-beta", "max-tokens-3-5-sonnet-2024-07-15") req.Header.Set("anthropic-beta", "output-128k-2025-02-19")
} }
return nil return nil
@@ -47,7 +47,7 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
if request == nil { if request == nil {
return nil, errors.New("request is nil") return nil, errors.New("request is nil")
} }
return ConvertRequest(*request), nil return ConvertRequest(c, *request)
} }
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) { func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {

View File

@@ -3,11 +3,13 @@ package anthropic
var ModelList = []string{ var ModelList = []string{
"claude-instant-1.2", "claude-2.0", "claude-2.1", "claude-instant-1.2", "claude-2.0", "claude-2.1",
"claude-3-haiku-20240307", "claude-3-haiku-20240307",
"claude-3-5-haiku-20241022",
"claude-3-5-haiku-latest", "claude-3-5-haiku-latest",
"claude-3-5-haiku-20241022",
"claude-3-sonnet-20240229", "claude-3-sonnet-20240229",
"claude-3-opus-20240229", "claude-3-opus-20240229",
"claude-3-5-sonnet-latest",
"claude-3-5-sonnet-20240620", "claude-3-5-sonnet-20240620",
"claude-3-5-sonnet-20241022", "claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-latest", "claude-3-7-sonnet-latest",
"claude-3-7-sonnet-20250219",
} }

View File

@@ -2,18 +2,21 @@ package anthropic
import ( import (
"bufio" "bufio"
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/songquanpeng/one-api/common/render"
"io" "io"
"math"
"net/http" "net/http"
"strings" "strings"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/pkg/errors"
"github.com/songquanpeng/one-api/common" "github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/helper" "github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/image" "github.com/songquanpeng/one-api/common/image"
"github.com/songquanpeng/one-api/common/logger" "github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/common/render"
"github.com/songquanpeng/one-api/relay/adaptor/openai" "github.com/songquanpeng/one-api/relay/adaptor/openai"
"github.com/songquanpeng/one-api/relay/model" "github.com/songquanpeng/one-api/relay/model"
) )
@@ -36,7 +39,16 @@ func stopReasonClaude2OpenAI(reason *string) string {
} }
} }
func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request { // isModelSupportThinking is used to check if the model supports extended thinking
func isModelSupportThinking(model string) bool {
if strings.Contains(model, "claude-3-7-sonnet") {
return true
}
return false
}
func ConvertRequest(c *gin.Context, textRequest model.GeneralOpenAIRequest) (*Request, error) {
claudeTools := make([]Tool, 0, len(textRequest.Tools)) claudeTools := make([]Tool, 0, len(textRequest.Tools))
for _, tool := range textRequest.Tools { for _, tool := range textRequest.Tools {
@@ -61,7 +73,27 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
TopK: textRequest.TopK, TopK: textRequest.TopK,
Stream: textRequest.Stream, Stream: textRequest.Stream,
Tools: claudeTools, Tools: claudeTools,
Thinking: textRequest.Thinking,
} }
if isModelSupportThinking(textRequest.Model) &&
c.Request.URL.Query().Has("thinking") && claudeRequest.Thinking == nil {
claudeRequest.Thinking = &model.Thinking{
Type: "enabled",
BudgetTokens: int(math.Min(1024, float64(claudeRequest.MaxTokens/2))),
}
}
if isModelSupportThinking(textRequest.Model) &&
claudeRequest.Thinking != nil {
if claudeRequest.MaxTokens <= 1024 {
return nil, errors.New("max_tokens must be greater than 1024 when using extended thinking")
}
// top_p must be nil when using extended thinking
claudeRequest.TopP = nil
}
if len(claudeTools) > 0 { if len(claudeTools) > 0 {
claudeToolChoice := struct { claudeToolChoice := struct {
Type string `json:"type"` Type string `json:"type"`
@@ -142,13 +174,14 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
claudeMessage.Content = contents claudeMessage.Content = contents
claudeRequest.Messages = append(claudeRequest.Messages, claudeMessage) claudeRequest.Messages = append(claudeRequest.Messages, claudeMessage)
} }
return &claudeRequest return &claudeRequest, nil
} }
// https://docs.anthropic.com/claude/reference/messages-streaming // https://docs.anthropic.com/claude/reference/messages-streaming
func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCompletionsStreamResponse, *Response) { func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCompletionsStreamResponse, *Response) {
var response *Response var response *Response
var responseText string var responseText string
var reasoningText string
var stopReason string var stopReason string
tools := make([]model.Tool, 0) tools := make([]model.Tool, 0)
@@ -158,6 +191,10 @@ func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCo
case "content_block_start": case "content_block_start":
if claudeResponse.ContentBlock != nil { if claudeResponse.ContentBlock != nil {
responseText = claudeResponse.ContentBlock.Text responseText = claudeResponse.ContentBlock.Text
if claudeResponse.ContentBlock.Thinking != nil {
reasoningText = *claudeResponse.ContentBlock.Thinking
}
if claudeResponse.ContentBlock.Type == "tool_use" { if claudeResponse.ContentBlock.Type == "tool_use" {
tools = append(tools, model.Tool{ tools = append(tools, model.Tool{
Id: claudeResponse.ContentBlock.Id, Id: claudeResponse.ContentBlock.Id,
@@ -172,6 +209,10 @@ func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCo
case "content_block_delta": case "content_block_delta":
if claudeResponse.Delta != nil { if claudeResponse.Delta != nil {
responseText = claudeResponse.Delta.Text responseText = claudeResponse.Delta.Text
if claudeResponse.Delta.Thinking != nil {
reasoningText = *claudeResponse.Delta.Thinking
}
if claudeResponse.Delta.Type == "input_json_delta" { if claudeResponse.Delta.Type == "input_json_delta" {
tools = append(tools, model.Tool{ tools = append(tools, model.Tool{
Function: model.Function{ Function: model.Function{
@@ -189,9 +230,20 @@ func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCo
if claudeResponse.Delta != nil && claudeResponse.Delta.StopReason != nil { if claudeResponse.Delta != nil && claudeResponse.Delta.StopReason != nil {
stopReason = *claudeResponse.Delta.StopReason stopReason = *claudeResponse.Delta.StopReason
} }
case "thinking_delta":
if claudeResponse.Delta != nil && claudeResponse.Delta.Thinking != nil {
reasoningText = *claudeResponse.Delta.Thinking
}
case "ping",
"message_stop",
"content_block_stop":
default:
logger.SysErrorf("unknown stream response type %q", claudeResponse.Type)
} }
var choice openai.ChatCompletionsStreamResponseChoice var choice openai.ChatCompletionsStreamResponseChoice
choice.Delta.Content = responseText choice.Delta.Content = responseText
choice.Delta.Reasoning = &reasoningText
if len(tools) > 0 { if len(tools) > 0 {
choice.Delta.Content = nil // compatible with other OpenAI derivative applications, like LobeOpenAICompatibleFactory ... choice.Delta.Content = nil // compatible with other OpenAI derivative applications, like LobeOpenAICompatibleFactory ...
choice.Delta.ToolCalls = tools choice.Delta.ToolCalls = tools
@@ -209,11 +261,23 @@ func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCo
func ResponseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse { func ResponseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse {
var responseText string var responseText string
if len(claudeResponse.Content) > 0 { var reasoningText string
responseText = claudeResponse.Content[0].Text
}
tools := make([]model.Tool, 0) tools := make([]model.Tool, 0)
for _, v := range claudeResponse.Content { for _, v := range claudeResponse.Content {
switch v.Type {
case "thinking":
if v.Thinking != nil {
reasoningText += *v.Thinking
} else {
logger.Errorf(context.Background(), "thinking is nil in response")
}
case "text":
responseText += v.Text
default:
logger.Warnf(context.Background(), "unknown response type %q", v.Type)
}
if v.Type == "tool_use" { if v.Type == "tool_use" {
args, _ := json.Marshal(v.Input) args, _ := json.Marshal(v.Input)
tools = append(tools, model.Tool{ tools = append(tools, model.Tool{
@@ -226,11 +290,13 @@ func ResponseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse {
}) })
} }
} }
choice := openai.TextResponseChoice{ choice := openai.TextResponseChoice{
Index: 0, Index: 0,
Message: model.Message{ Message: model.Message{
Role: "assistant", Role: "assistant",
Content: responseText, Content: responseText,
Reasoning: &reasoningText,
Name: nil, Name: nil,
ToolCalls: tools, ToolCalls: tools,
}, },
@@ -277,6 +343,8 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
data = strings.TrimPrefix(data, "data:") data = strings.TrimPrefix(data, "data:")
data = strings.TrimSpace(data) data = strings.TrimSpace(data)
logger.Debugf(c.Request.Context(), "stream <- %q\n", data)
var claudeResponse StreamResponse var claudeResponse StreamResponse
err := json.Unmarshal([]byte(data), &claudeResponse) err := json.Unmarshal([]byte(data), &claudeResponse)
if err != nil { if err != nil {
@@ -344,6 +412,9 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName st
if err != nil { if err != nil {
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
} }
logger.Debugf(c.Request.Context(), "response <- %s\n", string(responseBody))
var claudeResponse Response var claudeResponse Response
err = json.Unmarshal(responseBody, &claudeResponse) err = json.Unmarshal(responseBody, &claudeResponse)
if err != nil { if err != nil {

View File

@@ -1,5 +1,7 @@
package anthropic package anthropic
import "github.com/songquanpeng/one-api/relay/model"
// https://docs.anthropic.com/claude/reference/messages_post // https://docs.anthropic.com/claude/reference/messages_post
type Metadata struct { type Metadata struct {
@@ -22,6 +24,9 @@ type Content struct {
Input any `json:"input,omitempty"` Input any `json:"input,omitempty"`
Content string `json:"content,omitempty"` Content string `json:"content,omitempty"`
ToolUseId string `json:"tool_use_id,omitempty"` ToolUseId string `json:"tool_use_id,omitempty"`
// https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#implementing-extended-thinking
Thinking *string `json:"thinking,omitempty"`
Signature *string `json:"signature,omitempty"`
} }
type Message struct { type Message struct {
@@ -54,6 +59,7 @@ type Request struct {
Tools []Tool `json:"tools,omitempty"` Tools []Tool `json:"tools,omitempty"`
ToolChoice any `json:"tool_choice,omitempty"` ToolChoice any `json:"tool_choice,omitempty"`
//Metadata `json:"metadata,omitempty"` //Metadata `json:"metadata,omitempty"`
Thinking *model.Thinking `json:"thinking,omitempty"`
} }
type Usage struct { type Usage struct {
@@ -84,6 +90,8 @@ type Delta struct {
PartialJson string `json:"partial_json,omitempty"` PartialJson string `json:"partial_json,omitempty"`
StopReason *string `json:"stop_reason"` StopReason *string `json:"stop_reason"`
StopSequence *string `json:"stop_sequence"` StopSequence *string `json:"stop_sequence"`
Thinking *string `json:"thinking,omitempty"`
Signature *string `json:"signature,omitempty"`
} }
type StreamResponse struct { type StreamResponse struct {

View File

@@ -21,7 +21,11 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
return nil, errors.New("request is nil") return nil, errors.New("request is nil")
} }
claudeReq := anthropic.ConvertRequest(*request) claudeReq, err := anthropic.ConvertRequest(c, *request)
if err != nil {
return nil, errors.Wrap(err, "convert request")
}
c.Set(ctxkey.RequestModel, request.Model) c.Set(ctxkey.RequestModel, request.Model)
c.Set(ctxkey.ConvertedRequest, claudeReq) c.Set(ctxkey.ConvertedRequest, claudeReq)
return claudeReq, nil return claudeReq, nil

View File

@@ -36,6 +36,8 @@ var AwsModelIDMap = map[string]string{
"claude-3-5-sonnet-20241022": "anthropic.claude-3-5-sonnet-20241022-v2:0", "claude-3-5-sonnet-20241022": "anthropic.claude-3-5-sonnet-20241022-v2:0",
"claude-3-5-sonnet-latest": "anthropic.claude-3-5-sonnet-20241022-v2:0", "claude-3-5-sonnet-latest": "anthropic.claude-3-5-sonnet-20241022-v2:0",
"claude-3-5-haiku-20241022": "anthropic.claude-3-5-haiku-20241022-v1:0", "claude-3-5-haiku-20241022": "anthropic.claude-3-5-haiku-20241022-v1:0",
"claude-3-7-sonnet-latest": "anthropic.claude-3-7-sonnet-20250219-v1:0",
"claude-3-7-sonnet-20250219": "anthropic.claude-3-7-sonnet-20250219-v1:0",
} }
func awsModelID(requestModel string) (string, error) { func awsModelID(requestModel string) (string, error) {
@@ -47,13 +49,14 @@ func awsModelID(requestModel string) (string, error) {
} }
func Handler(c *gin.Context, awsCli *bedrockruntime.Client, modelName string) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { func Handler(c *gin.Context, awsCli *bedrockruntime.Client, modelName string) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) {
awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel)) awsModelID, err := awsModelID(c.GetString(ctxkey.RequestModel))
if err != nil { if err != nil {
return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
} }
awsModelID = utils.ConvertModelID2CrossRegionProfile(awsModelID, awsCli.Options().Region)
awsReq := &bedrockruntime.InvokeModelInput{ awsReq := &bedrockruntime.InvokeModelInput{
ModelId: aws.String(awsModelId), ModelId: aws.String(awsModelID),
Accept: aws.String("application/json"), Accept: aws.String("application/json"),
ContentType: aws.String("application/json"), ContentType: aws.String("application/json"),
} }
@@ -101,13 +104,14 @@ func Handler(c *gin.Context, awsCli *bedrockruntime.Client, modelName string) (*
func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) {
createdTime := helper.GetTimestamp() createdTime := helper.GetTimestamp()
awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel)) awsModelID, err := awsModelID(c.GetString(ctxkey.RequestModel))
if err != nil { if err != nil {
return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
} }
awsModelID = utils.ConvertModelID2CrossRegionProfile(awsModelID, awsCli.Options().Region)
awsReq := &bedrockruntime.InvokeModelWithResponseStreamInput{ awsReq := &bedrockruntime.InvokeModelWithResponseStreamInput{
ModelId: aws.String(awsModelId), ModelId: aws.String(awsModelID),
Accept: aws.String("application/json"), Accept: aws.String("application/json"),
ContentType: aws.String("application/json"), ContentType: aws.String("application/json"),
} }

View File

@@ -1,6 +1,9 @@
package aws package aws
import "github.com/songquanpeng/one-api/relay/adaptor/anthropic" import (
"github.com/songquanpeng/one-api/relay/adaptor/anthropic"
"github.com/songquanpeng/one-api/relay/model"
)
// Request is the request to AWS Claude // Request is the request to AWS Claude
// //
@@ -17,4 +20,5 @@ type Request struct {
StopSequences []string `json:"stop_sequences,omitempty"` StopSequences []string `json:"stop_sequences,omitempty"`
Tools []anthropic.Tool `json:"tools,omitempty"` Tools []anthropic.Tool `json:"tools,omitempty"`
ToolChoice any `json:"tool_choice,omitempty"` ToolChoice any `json:"tool_choice,omitempty"`
Thinking *model.Thinking `json:"thinking,omitempty"`
} }

View File

@@ -70,13 +70,14 @@ func ConvertRequest(textRequest relaymodel.GeneralOpenAIRequest) *Request {
} }
func Handler(c *gin.Context, awsCli *bedrockruntime.Client, modelName string) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { func Handler(c *gin.Context, awsCli *bedrockruntime.Client, modelName string) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) {
awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel)) awsModelID, err := awsModelID(c.GetString(ctxkey.RequestModel))
if err != nil { if err != nil {
return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
} }
awsModelID = utils.ConvertModelID2CrossRegionProfile(awsModelID, awsCli.Options().Region)
awsReq := &bedrockruntime.InvokeModelInput{ awsReq := &bedrockruntime.InvokeModelInput{
ModelId: aws.String(awsModelId), ModelId: aws.String(awsModelID),
Accept: aws.String("application/json"), Accept: aws.String("application/json"),
ContentType: aws.String("application/json"), ContentType: aws.String("application/json"),
} }
@@ -140,13 +141,14 @@ func ResponseLlama2OpenAI(llamaResponse *Response) *openai.TextResponse {
func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) {
createdTime := helper.GetTimestamp() createdTime := helper.GetTimestamp()
awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel)) awsModelID, err := awsModelID(c.GetString(ctxkey.RequestModel))
if err != nil { if err != nil {
return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
} }
awsModelID = utils.ConvertModelID2CrossRegionProfile(awsModelID, awsCli.Options().Region)
awsReq := &bedrockruntime.InvokeModelWithResponseStreamInput{ awsReq := &bedrockruntime.InvokeModelWithResponseStreamInput{
ModelId: aws.String(awsModelId), ModelId: aws.String(awsModelID),
Accept: aws.String("application/json"), Accept: aws.String("application/json"),
ContentType: aws.String("application/json"), ContentType: aws.String("application/json"),
} }

View File

@@ -0,0 +1,75 @@
package utils
import (
"context"
"slices"
"strings"
"github.com/songquanpeng/one-api/common/logger"
)
// CrossRegionInferences is a list of model IDs that support cross-region inference.
//
// https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-support.html
//
// document.querySelectorAll('pre.programlisting code').forEach((e) => {console.log(e.innerHTML)})
var CrossRegionInferences = []string{
"us.amazon.nova-lite-v1:0",
"us.amazon.nova-micro-v1:0",
"us.amazon.nova-pro-v1:0",
"us.anthropic.claude-3-5-haiku-20241022-v1:0",
"us.anthropic.claude-3-5-sonnet-20240620-v1:0",
"us.anthropic.claude-3-5-sonnet-20241022-v2:0",
"us.anthropic.claude-3-7-sonnet-20250219-v1:0",
"us.anthropic.claude-3-haiku-20240307-v1:0",
"us.anthropic.claude-3-opus-20240229-v1:0",
"us.anthropic.claude-3-sonnet-20240229-v1:0",
"us.meta.llama3-1-405b-instruct-v1:0",
"us.meta.llama3-1-70b-instruct-v1:0",
"us.meta.llama3-1-8b-instruct-v1:0",
"us.meta.llama3-2-11b-instruct-v1:0",
"us.meta.llama3-2-1b-instruct-v1:0",
"us.meta.llama3-2-3b-instruct-v1:0",
"us.meta.llama3-2-90b-instruct-v1:0",
"us.meta.llama3-3-70b-instruct-v1:0",
"us-gov.anthropic.claude-3-5-sonnet-20240620-v1:0",
"us-gov.anthropic.claude-3-haiku-20240307-v1:0",
"eu.amazon.nova-lite-v1:0",
"eu.amazon.nova-micro-v1:0",
"eu.amazon.nova-pro-v1:0",
"eu.anthropic.claude-3-5-sonnet-20240620-v1:0",
"eu.anthropic.claude-3-haiku-20240307-v1:0",
"eu.anthropic.claude-3-sonnet-20240229-v1:0",
"eu.meta.llama3-2-1b-instruct-v1:0",
"eu.meta.llama3-2-3b-instruct-v1:0",
"apac.amazon.nova-lite-v1:0",
"apac.amazon.nova-micro-v1:0",
"apac.amazon.nova-pro-v1:0",
"apac.anthropic.claude-3-5-sonnet-20240620-v1:0",
"apac.anthropic.claude-3-5-sonnet-20241022-v2:0",
"apac.anthropic.claude-3-haiku-20240307-v1:0",
"apac.anthropic.claude-3-sonnet-20240229-v1:0",
}
// ConvertModelID2CrossRegionProfile converts the model ID to a cross-region profile ID.
func ConvertModelID2CrossRegionProfile(model, region string) string {
var regionPrefix string
switch prefix := strings.Split(region, "-")[0]; prefix {
case "us", "eu":
regionPrefix = prefix
case "ap":
regionPrefix = "apac"
default:
// not supported, return original model
return model
}
newModelID := regionPrefix + "." + model
if slices.Contains(CrossRegionInferences, newModelID) {
logger.Debugf(context.TODO(), "convert model %s to cross-region profile %s", model, newModelID)
return newModelID
}
// not found, return original model
return model
}

View File

@@ -1,18 +1,12 @@
package gemini package gemini
import (
"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
)
// https://ai.google.dev/models/gemini // https://ai.google.dev/models/gemini
var ModelList = []string{ var ModelList = geminiv2.ModelList
"gemini-pro", "gemini-1.0-pro",
// "gemma-2-2b-it", "gemma-2-9b-it", "gemma-2-27b-it",
"gemini-1.5-flash", "gemini-1.5-flash-8b",
"gemini-1.5-pro", "gemini-1.5-pro-experimental",
"text-embedding-004", "aqa",
"gemini-2.0-flash", "gemini-2.0-flash-exp",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-pro-exp-02-05",
}
// ModelsSupportSystemInstruction is the list of models that support system instruction. // ModelsSupportSystemInstruction is the list of models that support system instruction.
// //

View File

@@ -0,0 +1,15 @@
package geminiv2
// https://ai.google.dev/models/gemini
var ModelList = []string{
"gemini-pro", "gemini-1.0-pro",
// "gemma-2-2b-it", "gemma-2-9b-it", "gemma-2-27b-it",
"gemini-1.5-flash", "gemini-1.5-flash-8b",
"gemini-1.5-pro", "gemini-1.5-pro-experimental",
"text-embedding-004", "aqa",
"gemini-2.0-flash", "gemini-2.0-flash-exp",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-pro-exp-02-05",
}

View File

@@ -0,0 +1,14 @@
package geminiv2
import (
"fmt"
"strings"
"github.com/songquanpeng/one-api/relay/meta"
)
func GetRequestURL(meta *meta.Meta) (string, error) {
baseURL := strings.TrimSuffix(meta.BaseURL, "/")
requestPath := strings.TrimPrefix(meta.RequestURLPath, "/v1")
return fmt.Sprintf("%s%s", baseURL, requestPath), nil
}

View File

@@ -9,12 +9,16 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/adaptor" "github.com/songquanpeng/one-api/relay/adaptor"
"github.com/songquanpeng/one-api/relay/adaptor/alibailian" "github.com/songquanpeng/one-api/relay/adaptor/alibailian"
"github.com/songquanpeng/one-api/relay/adaptor/baiduv2" "github.com/songquanpeng/one-api/relay/adaptor/baiduv2"
"github.com/songquanpeng/one-api/relay/adaptor/doubao" "github.com/songquanpeng/one-api/relay/adaptor/doubao"
"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
"github.com/songquanpeng/one-api/relay/adaptor/minimax" "github.com/songquanpeng/one-api/relay/adaptor/minimax"
"github.com/songquanpeng/one-api/relay/adaptor/novita" "github.com/songquanpeng/one-api/relay/adaptor/novita"
"github.com/songquanpeng/one-api/relay/adaptor/openrouter"
"github.com/songquanpeng/one-api/relay/channeltype" "github.com/songquanpeng/one-api/relay/channeltype"
"github.com/songquanpeng/one-api/relay/meta" "github.com/songquanpeng/one-api/relay/meta"
"github.com/songquanpeng/one-api/relay/model" "github.com/songquanpeng/one-api/relay/model"
@@ -59,6 +63,8 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
return baiduv2.GetRequestURL(meta) return baiduv2.GetRequestURL(meta)
case channeltype.AliBailian: case channeltype.AliBailian:
return alibailian.GetRequestURL(meta) return alibailian.GetRequestURL(meta)
case channeltype.GeminiOpenAICompatible:
return geminiv2.GetRequestURL(meta)
default: default:
return GetFullRequestURL(meta.BaseURL, meta.RequestURLPath, meta.ChannelType), nil return GetFullRequestURL(meta.BaseURL, meta.RequestURLPath, meta.ChannelType), nil
} }
@@ -82,7 +88,29 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
if request == nil { if request == nil {
return nil, errors.New("request is nil") return nil, errors.New("request is nil")
} }
if request.Stream {
meta := meta.GetByContext(c)
switch meta.ChannelType {
case channeltype.OpenRouter:
includeReasoning := true
request.IncludeReasoning = &includeReasoning
if request.Provider == nil || request.Provider.Sort == "" &&
config.OpenrouterProviderSort != "" {
if request.Provider == nil {
request.Provider = &openrouter.RequestProvider{}
}
request.Provider.Sort = config.OpenrouterProviderSort
}
default:
}
if request.Stream && !config.EnforceIncludeUsage {
logger.Warn(c.Request.Context(),
"please set ENFORCE_INCLUDE_USAGE=true to ensure accurate billing in stream mode")
}
if config.EnforceIncludeUsage && request.Stream {
// always return usage in stream mode // always return usage in stream mode
if request.StreamOptions == nil { if request.StreamOptions == nil {
request.StreamOptions = &model.StreamOptions{} request.StreamOptions = &model.StreamOptions{}

View File

@@ -7,6 +7,7 @@ import (
"github.com/songquanpeng/one-api/relay/adaptor/baiduv2" "github.com/songquanpeng/one-api/relay/adaptor/baiduv2"
"github.com/songquanpeng/one-api/relay/adaptor/deepseek" "github.com/songquanpeng/one-api/relay/adaptor/deepseek"
"github.com/songquanpeng/one-api/relay/adaptor/doubao" "github.com/songquanpeng/one-api/relay/adaptor/doubao"
"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
"github.com/songquanpeng/one-api/relay/adaptor/groq" "github.com/songquanpeng/one-api/relay/adaptor/groq"
"github.com/songquanpeng/one-api/relay/adaptor/lingyiwanwu" "github.com/songquanpeng/one-api/relay/adaptor/lingyiwanwu"
"github.com/songquanpeng/one-api/relay/adaptor/minimax" "github.com/songquanpeng/one-api/relay/adaptor/minimax"
@@ -82,6 +83,8 @@ func GetCompatibleChannelMeta(channelType int) (string, []string) {
return "openrouter", openrouter.ModelList return "openrouter", openrouter.ModelList
case channeltype.AliBailian: case channeltype.AliBailian:
return "alibailian", alibailian.ModelList return "alibailian", alibailian.ModelList
case channeltype.GeminiOpenAICompatible:
return "geminiv2", geminiv2.ModelList
default: default:
return "openai", ModelList return "openai", ModelList
} }

View File

@@ -17,6 +17,9 @@ func ResponseText2Usage(responseText string, modelName string, promptTokens int)
} }
func GetFullRequestURL(baseURL string, requestURL string, channelType int) string { func GetFullRequestURL(baseURL string, requestURL string, channelType int) string {
if channelType == channeltype.OpenAICompatible {
return fmt.Sprintf("%s%s", strings.TrimSuffix(baseURL, "/"), strings.TrimPrefix(requestURL, "/v1"))
}
fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL) fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
if strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") { if strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") {

View File

@@ -8,12 +8,11 @@ import (
"net/http" "net/http"
"strings" "strings"
"github.com/songquanpeng/one-api/common/render"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common" "github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/conv" "github.com/songquanpeng/one-api/common/conv"
"github.com/songquanpeng/one-api/common/logger" "github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/common/render"
"github.com/songquanpeng/one-api/relay/model" "github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/relaymode" "github.com/songquanpeng/one-api/relay/relaymode"
) )
@@ -26,6 +25,7 @@ const (
func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.ErrorWithStatusCode, string, *model.Usage) { func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.ErrorWithStatusCode, string, *model.Usage) {
responseText := "" responseText := ""
reasoningText := ""
scanner := bufio.NewScanner(resp.Body) scanner := bufio.NewScanner(resp.Body)
scanner.Split(bufio.ScanLines) scanner.Split(bufio.ScanLines)
var usage *model.Usage var usage *model.Usage
@@ -61,6 +61,13 @@ func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.E
} }
render.StringData(c, data) render.StringData(c, data)
for _, choice := range streamResponse.Choices { for _, choice := range streamResponse.Choices {
if choice.Delta.Reasoning != nil {
reasoningText += *choice.Delta.Reasoning
}
if choice.Delta.ReasoningContent != nil {
reasoningText += *choice.Delta.ReasoningContent
}
responseText += conv.AsString(choice.Delta.Content) responseText += conv.AsString(choice.Delta.Content)
} }
if streamResponse.Usage != nil { if streamResponse.Usage != nil {
@@ -93,7 +100,7 @@ func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.E
return ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), "", nil return ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), "", nil
} }
return nil, responseText, usage return nil, reasoningText + responseText, usage
} }
func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) { func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) {
@@ -136,10 +143,17 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName st
return ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil return ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
} }
if textResponse.Usage.TotalTokens == 0 || (textResponse.Usage.PromptTokens == 0 && textResponse.Usage.CompletionTokens == 0) { if textResponse.Usage.TotalTokens == 0 ||
(textResponse.Usage.PromptTokens == 0 && textResponse.Usage.CompletionTokens == 0) {
completionTokens := 0 completionTokens := 0
for _, choice := range textResponse.Choices { for _, choice := range textResponse.Choices {
completionTokens += CountTokenText(choice.Message.StringContent(), modelName) completionTokens += CountTokenText(choice.Message.StringContent(), modelName)
if choice.Message.Reasoning != nil {
completionTokens += CountToken(*choice.Message.Reasoning)
}
if choice.ReasoningContent != nil {
completionTokens += CountToken(*choice.ReasoningContent)
}
} }
textResponse.Usage = model.Usage{ textResponse.Usage = model.Usage{
PromptTokens: promptTokens, PromptTokens: promptTokens,
@@ -147,5 +161,6 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName st
TotalTokens: promptTokens + completionTokens, TotalTokens: promptTokens + completionTokens,
} }
} }
return nil, &textResponse.Usage return nil, &textResponse.Usage
} }

View File

@@ -1,20 +1,235 @@
package openrouter package openrouter
var ModelList = []string{ var ModelList = []string{
"openai/gpt-3.5-turbo", "01-ai/yi-large",
"openai/chatgpt-4o-latest", "aetherwiing/mn-starcannon-12b",
"openai/o1", "ai21/jamba-1-5-large",
"openai/o1-preview", "ai21/jamba-1-5-mini",
"openai/o1-mini", "ai21/jamba-instruct",
"openai/o3-mini", "aion-labs/aion-1.0",
"google/gemini-2.0-flash-001", "aion-labs/aion-1.0-mini",
"google/gemini-2.0-flash-thinking-exp:free", "aion-labs/aion-rp-llama-3.1-8b",
"google/gemini-2.0-flash-lite-preview-02-05:free", "allenai/llama-3.1-tulu-3-405b",
"google/gemini-2.0-pro-exp-02-05:free", "alpindale/goliath-120b",
"google/gemini-flash-1.5-8b", "alpindale/magnum-72b",
"anthropic/claude-3.5-sonnet", "amazon/nova-lite-v1",
"amazon/nova-micro-v1",
"amazon/nova-pro-v1",
"anthracite-org/magnum-v2-72b",
"anthracite-org/magnum-v4-72b",
"anthropic/claude-2",
"anthropic/claude-2.0",
"anthropic/claude-2.0:beta",
"anthropic/claude-2.1",
"anthropic/claude-2.1:beta",
"anthropic/claude-2:beta",
"anthropic/claude-3-haiku",
"anthropic/claude-3-haiku:beta",
"anthropic/claude-3-opus",
"anthropic/claude-3-opus:beta",
"anthropic/claude-3-sonnet",
"anthropic/claude-3-sonnet:beta",
"anthropic/claude-3.5-haiku", "anthropic/claude-3.5-haiku",
"deepseek/deepseek-r1:free", "anthropic/claude-3.5-haiku-20241022",
"anthropic/claude-3.5-haiku-20241022:beta",
"anthropic/claude-3.5-haiku:beta",
"anthropic/claude-3.5-sonnet",
"anthropic/claude-3.5-sonnet-20240620",
"anthropic/claude-3.5-sonnet-20240620:beta",
"anthropic/claude-3.5-sonnet:beta",
"cognitivecomputations/dolphin-mixtral-8x22b",
"cognitivecomputations/dolphin-mixtral-8x7b",
"cohere/command",
"cohere/command-r",
"cohere/command-r-03-2024",
"cohere/command-r-08-2024",
"cohere/command-r-plus",
"cohere/command-r-plus-04-2024",
"cohere/command-r-plus-08-2024",
"cohere/command-r7b-12-2024",
"databricks/dbrx-instruct",
"deepseek/deepseek-chat",
"deepseek/deepseek-chat-v2.5",
"deepseek/deepseek-chat:free",
"deepseek/deepseek-r1", "deepseek/deepseek-r1",
"deepseek/deepseek-r1-distill-llama-70b",
"deepseek/deepseek-r1-distill-llama-70b:free",
"deepseek/deepseek-r1-distill-llama-8b",
"deepseek/deepseek-r1-distill-qwen-1.5b",
"deepseek/deepseek-r1-distill-qwen-14b",
"deepseek/deepseek-r1-distill-qwen-32b",
"deepseek/deepseek-r1:free",
"eva-unit-01/eva-llama-3.33-70b",
"eva-unit-01/eva-qwen-2.5-32b",
"eva-unit-01/eva-qwen-2.5-72b",
"google/gemini-2.0-flash-001",
"google/gemini-2.0-flash-exp:free",
"google/gemini-2.0-flash-lite-preview-02-05:free",
"google/gemini-2.0-flash-thinking-exp-1219:free",
"google/gemini-2.0-flash-thinking-exp:free",
"google/gemini-2.0-pro-exp-02-05:free",
"google/gemini-exp-1206:free",
"google/gemini-flash-1.5",
"google/gemini-flash-1.5-8b",
"google/gemini-flash-1.5-8b-exp",
"google/gemini-pro",
"google/gemini-pro-1.5",
"google/gemini-pro-vision",
"google/gemma-2-27b-it",
"google/gemma-2-9b-it",
"google/gemma-2-9b-it:free",
"google/gemma-7b-it",
"google/learnlm-1.5-pro-experimental:free",
"google/palm-2-chat-bison",
"google/palm-2-chat-bison-32k",
"google/palm-2-codechat-bison",
"google/palm-2-codechat-bison-32k",
"gryphe/mythomax-l2-13b",
"gryphe/mythomax-l2-13b:free",
"huggingfaceh4/zephyr-7b-beta:free",
"infermatic/mn-inferor-12b",
"inflection/inflection-3-pi",
"inflection/inflection-3-productivity",
"jondurbin/airoboros-l2-70b",
"liquid/lfm-3b",
"liquid/lfm-40b",
"liquid/lfm-7b",
"mancer/weaver",
"meta-llama/llama-2-13b-chat",
"meta-llama/llama-2-70b-chat",
"meta-llama/llama-3-70b-instruct",
"meta-llama/llama-3-8b-instruct",
"meta-llama/llama-3-8b-instruct:free",
"meta-llama/llama-3.1-405b",
"meta-llama/llama-3.1-405b-instruct",
"meta-llama/llama-3.1-70b-instruct",
"meta-llama/llama-3.1-8b-instruct",
"meta-llama/llama-3.2-11b-vision-instruct",
"meta-llama/llama-3.2-11b-vision-instruct:free",
"meta-llama/llama-3.2-1b-instruct",
"meta-llama/llama-3.2-3b-instruct",
"meta-llama/llama-3.2-90b-vision-instruct",
"meta-llama/llama-3.3-70b-instruct",
"meta-llama/llama-3.3-70b-instruct:free",
"meta-llama/llama-guard-2-8b",
"microsoft/phi-3-medium-128k-instruct",
"microsoft/phi-3-medium-128k-instruct:free",
"microsoft/phi-3-mini-128k-instruct",
"microsoft/phi-3-mini-128k-instruct:free",
"microsoft/phi-3.5-mini-128k-instruct",
"microsoft/phi-4",
"microsoft/wizardlm-2-7b",
"microsoft/wizardlm-2-8x22b",
"minimax/minimax-01",
"mistralai/codestral-2501",
"mistralai/codestral-mamba",
"mistralai/ministral-3b",
"mistralai/ministral-8b",
"mistralai/mistral-7b-instruct",
"mistralai/mistral-7b-instruct-v0.1",
"mistralai/mistral-7b-instruct-v0.3",
"mistralai/mistral-7b-instruct:free",
"mistralai/mistral-large",
"mistralai/mistral-large-2407",
"mistralai/mistral-large-2411",
"mistralai/mistral-medium",
"mistralai/mistral-nemo",
"mistralai/mistral-nemo:free",
"mistralai/mistral-small",
"mistralai/mistral-small-24b-instruct-2501",
"mistralai/mistral-small-24b-instruct-2501:free",
"mistralai/mistral-tiny",
"mistralai/mixtral-8x22b-instruct",
"mistralai/mixtral-8x7b",
"mistralai/mixtral-8x7b-instruct",
"mistralai/pixtral-12b",
"mistralai/pixtral-large-2411",
"neversleep/llama-3-lumimaid-70b",
"neversleep/llama-3-lumimaid-8b",
"neversleep/llama-3-lumimaid-8b:extended",
"neversleep/llama-3.1-lumimaid-70b",
"neversleep/llama-3.1-lumimaid-8b",
"neversleep/noromaid-20b",
"nothingiisreal/mn-celeste-12b",
"nousresearch/hermes-2-pro-llama-3-8b",
"nousresearch/hermes-3-llama-3.1-405b",
"nousresearch/hermes-3-llama-3.1-70b",
"nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
"nousresearch/nous-hermes-llama2-13b",
"nvidia/llama-3.1-nemotron-70b-instruct",
"nvidia/llama-3.1-nemotron-70b-instruct:free",
"openai/chatgpt-4o-latest",
"openai/gpt-3.5-turbo",
"openai/gpt-3.5-turbo-0125",
"openai/gpt-3.5-turbo-0613",
"openai/gpt-3.5-turbo-1106",
"openai/gpt-3.5-turbo-16k",
"openai/gpt-3.5-turbo-instruct",
"openai/gpt-4",
"openai/gpt-4-0314",
"openai/gpt-4-1106-preview",
"openai/gpt-4-32k",
"openai/gpt-4-32k-0314",
"openai/gpt-4-turbo",
"openai/gpt-4-turbo-preview",
"openai/gpt-4o",
"openai/gpt-4o-2024-05-13",
"openai/gpt-4o-2024-08-06",
"openai/gpt-4o-2024-11-20",
"openai/gpt-4o-mini",
"openai/gpt-4o-mini-2024-07-18",
"openai/gpt-4o:extended",
"openai/o1",
"openai/o1-mini",
"openai/o1-mini-2024-09-12",
"openai/o1-preview",
"openai/o1-preview-2024-09-12",
"openai/o3-mini",
"openai/o3-mini-high",
"openchat/openchat-7b",
"openchat/openchat-7b:free",
"openrouter/auto",
"perplexity/llama-3.1-sonar-huge-128k-online",
"perplexity/llama-3.1-sonar-large-128k-chat",
"perplexity/llama-3.1-sonar-large-128k-online",
"perplexity/llama-3.1-sonar-small-128k-chat",
"perplexity/llama-3.1-sonar-small-128k-online",
"perplexity/sonar",
"perplexity/sonar-reasoning",
"pygmalionai/mythalion-13b",
"qwen/qvq-72b-preview",
"qwen/qwen-2-72b-instruct",
"qwen/qwen-2-7b-instruct",
"qwen/qwen-2-7b-instruct:free",
"qwen/qwen-2-vl-72b-instruct",
"qwen/qwen-2-vl-7b-instruct",
"qwen/qwen-2.5-72b-instruct",
"qwen/qwen-2.5-7b-instruct",
"qwen/qwen-2.5-coder-32b-instruct",
"qwen/qwen-max",
"qwen/qwen-plus",
"qwen/qwen-turbo",
"qwen/qwen-vl-plus:free", "qwen/qwen-vl-plus:free",
"qwen/qwen2.5-vl-72b-instruct:free",
"qwen/qwq-32b-preview",
"raifle/sorcererlm-8x22b",
"sao10k/fimbulvetr-11b-v2",
"sao10k/l3-euryale-70b",
"sao10k/l3-lunaris-8b",
"sao10k/l3.1-70b-hanami-x1",
"sao10k/l3.1-euryale-70b",
"sao10k/l3.3-euryale-70b",
"sophosympatheia/midnight-rose-70b",
"sophosympatheia/rogue-rose-103b-v0.2:free",
"teknium/openhermes-2.5-mistral-7b",
"thedrummer/rocinante-12b",
"thedrummer/unslopnemo-12b",
"undi95/remm-slerp-l2-13b",
"undi95/toppy-m-7b",
"undi95/toppy-m-7b:free",
"x-ai/grok-2-1212",
"x-ai/grok-2-vision-1212",
"x-ai/grok-beta",
"x-ai/grok-vision-beta",
"xwin-lm/xwin-lm-70b",
} }

View File

@@ -0,0 +1,22 @@
package openrouter
// RequestProvider customize how your requests are routed using the provider object
// in the request body for Chat Completions and Completions.
//
// https://openrouter.ai/docs/features/provider-routing
type RequestProvider struct {
// Order is list of provider names to try in order (e.g. ["Anthropic", "OpenAI"]). Default: empty
Order []string `json:"order,omitempty"`
// AllowFallbacks is whether to allow backup providers when the primary is unavailable. Default: true
AllowFallbacks bool `json:"allow_fallbacks,omitempty"`
// RequireParameters is only use providers that support all parameters in your request. Default: false
RequireParameters bool `json:"require_parameters,omitempty"`
// DataCollection is control whether to use providers that may store data ("allow" or "deny"). Default: "allow"
DataCollection string `json:"data_collection,omitempty" binding:"omitempty,oneof=allow deny"`
// Ignore is list of provider names to skip for this request. Default: empty
Ignore []string `json:"ignore,omitempty"`
// Quantizations is list of quantization levels to filter by (e.g. ["int4", "int8"]). Default: empty
Quantizations []string `json:"quantizations,omitempty"`
// Sort is sort providers by price or throughput (e.g. "price" or "throughput"). Default: empty
Sort string `json:"sort,omitempty" binding:"omitempty,oneof=price throughput latency"`
}

View File

@@ -19,6 +19,7 @@ var ModelList = []string{
"claude-3-5-sonnet@20240620", "claude-3-5-sonnet@20240620",
"claude-3-5-sonnet-v2@20241022", "claude-3-5-sonnet-v2@20241022",
"claude-3-5-haiku@20241022", "claude-3-5-haiku@20241022",
"claude-3-7-sonnet@20250219",
} }
const anthropicVersion = "vertex-2023-10-16" const anthropicVersion = "vertex-2023-10-16"
@@ -31,7 +32,11 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
return nil, errors.New("request is nil") return nil, errors.New("request is nil")
} }
claudeReq := anthropic.ConvertRequest(*request) claudeReq, err := anthropic.ConvertRequest(c, *request)
if err != nil {
return nil, errors.Wrap(err, "convert request")
}
req := Request{ req := Request{
AnthropicVersion: anthropicVersion, AnthropicVersion: anthropicVersion,
// Model: claudeReq.Model, // Model: claudeReq.Model,

View File

@@ -59,6 +59,8 @@ var ModelRatio = map[string]float64{
"o1-preview-2024-09-12": 7.5, "o1-preview-2024-09-12": 7.5,
"o1-mini": 1.5, // $3.00 / 1M input tokens "o1-mini": 1.5, // $3.00 / 1M input tokens
"o1-mini-2024-09-12": 1.5, "o1-mini-2024-09-12": 1.5,
"o3-mini": 1.5, // $3.00 / 1M input tokens
"o3-mini-2025-01-31": 1.5,
"davinci-002": 1, // $0.002 / 1K tokens "davinci-002": 1, // $0.002 / 1K tokens
"babbage-002": 0.2, // $0.0004 / 1K tokens "babbage-002": 0.2, // $0.0004 / 1K tokens
"text-ada-001": 0.2, "text-ada-001": 0.2,
@@ -96,6 +98,8 @@ var ModelRatio = map[string]float64{
"claude-3-5-sonnet-20240620": 3.0 / 1000 * USD, "claude-3-5-sonnet-20240620": 3.0 / 1000 * USD,
"claude-3-5-sonnet-20241022": 3.0 / 1000 * USD, "claude-3-5-sonnet-20241022": 3.0 / 1000 * USD,
"claude-3-5-sonnet-latest": 3.0 / 1000 * USD, "claude-3-5-sonnet-latest": 3.0 / 1000 * USD,
"claude-3-7-sonnet-20250219": 3.0 / 1000 * USD,
"claude-3-7-sonnet-latest": 3.0 / 1000 * USD,
"claude-3-opus-20240229": 15.0 / 1000 * USD, "claude-3-opus-20240229": 15.0 / 1000 * USD,
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7 // https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7
"ERNIE-4.0-8K": 0.120 * RMB, "ERNIE-4.0-8K": 0.120 * RMB,
@@ -159,91 +163,105 @@ var ModelRatio = map[string]float64{
"embedding-2": 0.0005 * RMB, "embedding-2": 0.0005 * RMB,
"embedding-3": 0.0005 * RMB, "embedding-3": 0.0005 * RMB,
// https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-thousand-questions-metering-and-billing // https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-thousand-questions-metering-and-billing
"qwen-turbo": 1.4286, // ¥0.02 / 1k tokens "qwen-turbo": 0.0003 * RMB,
"qwen-turbo-latest": 1.4286, "qwen-turbo-latest": 0.0003 * RMB,
"qwen-plus": 1.4286, "qwen-plus": 0.0008 * RMB,
"qwen-plus-latest": 1.4286, "qwen-plus-latest": 0.0008 * RMB,
"qwen-max": 1.4286, "qwen-max": 0.0024 * RMB,
"qwen-max-latest": 1.4286, "qwen-max-latest": 0.0024 * RMB,
"qwen-max-longcontext": 1.4286, "qwen-max-longcontext": 0.0005 * RMB,
"qwen-vl-max": 1.4286, "qwen-vl-max": 0.003 * RMB,
"qwen-vl-max-latest": 1.4286, "qwen-vl-max-latest": 0.003 * RMB,
"qwen-vl-plus": 1.4286, "qwen-vl-plus": 0.0015 * RMB,
"qwen-vl-plus-latest": 1.4286, "qwen-vl-plus-latest": 0.0015 * RMB,
"qwen-vl-ocr": 1.4286, "qwen-vl-ocr": 0.005 * RMB,
"qwen-vl-ocr-latest": 1.4286, "qwen-vl-ocr-latest": 0.005 * RMB,
"qwen-audio-turbo": 1.4286, "qwen-audio-turbo": 1.4286,
"qwen-math-plus": 1.4286, "qwen-math-plus": 0.004 * RMB,
"qwen-math-plus-latest": 1.4286, "qwen-math-plus-latest": 0.004 * RMB,
"qwen-math-turbo": 1.4286, "qwen-math-turbo": 0.002 * RMB,
"qwen-math-turbo-latest": 1.4286, "qwen-math-turbo-latest": 0.002 * RMB,
"qwen-coder-plus": 1.4286, "qwen-coder-plus": 0.0035 * RMB,
"qwen-coder-plus-latest": 1.4286, "qwen-coder-plus-latest": 0.0035 * RMB,
"qwen-coder-turbo": 1.4286, "qwen-coder-turbo": 0.002 * RMB,
"qwen-coder-turbo-latest": 1.4286, "qwen-coder-turbo-latest": 0.002 * RMB,
"qwq-32b-preview": 1.4286, "qwen-mt-plus": 0.015 * RMB,
"qwen2.5-72b-instruct": 1.4286, "qwen-mt-turbo": 0.001 * RMB,
"qwen2.5-32b-instruct": 1.4286, "qwq-32b-preview": 0.002 * RMB,
"qwen2.5-14b-instruct": 1.4286, "qwen2.5-72b-instruct": 0.004 * RMB,
"qwen2.5-7b-instruct": 1.4286, "qwen2.5-32b-instruct": 0.03 * RMB,
"qwen2.5-3b-instruct": 1.4286, "qwen2.5-14b-instruct": 0.001 * RMB,
"qwen2.5-1.5b-instruct": 1.4286, "qwen2.5-7b-instruct": 0.0005 * RMB,
"qwen2.5-0.5b-instruct": 1.4286, "qwen2.5-3b-instruct": 0.006 * RMB,
"qwen2-72b-instruct": 1.4286, "qwen2.5-1.5b-instruct": 0.0003 * RMB,
"qwen2-57b-a14b-instruct": 1.4286, "qwen2.5-0.5b-instruct": 0.0003 * RMB,
"qwen2-7b-instruct": 1.4286, "qwen2-72b-instruct": 0.004 * RMB,
"qwen2-1.5b-instruct": 1.4286, "qwen2-57b-a14b-instruct": 0.0035 * RMB,
"qwen2-0.5b-instruct": 1.4286, "qwen2-7b-instruct": 0.001 * RMB,
"qwen1.5-110b-chat": 1.4286, "qwen2-1.5b-instruct": 0.001 * RMB,
"qwen1.5-72b-chat": 1.4286, "qwen2-0.5b-instruct": 0.001 * RMB,
"qwen1.5-32b-chat": 1.4286, "qwen1.5-110b-chat": 0.007 * RMB,
"qwen1.5-14b-chat": 1.4286, "qwen1.5-72b-chat": 0.005 * RMB,
"qwen1.5-7b-chat": 1.4286, "qwen1.5-32b-chat": 0.0035 * RMB,
"qwen1.5-1.8b-chat": 1.4286, "qwen1.5-14b-chat": 0.002 * RMB,
"qwen1.5-0.5b-chat": 1.4286, "qwen1.5-7b-chat": 0.001 * RMB,
"qwen-72b-chat": 1.4286, "qwen1.5-1.8b-chat": 0.001 * RMB,
"qwen-14b-chat": 1.4286, "qwen1.5-0.5b-chat": 0.001 * RMB,
"qwen-7b-chat": 1.4286, "qwen-72b-chat": 0.02 * RMB,
"qwen-1.8b-chat": 1.4286, "qwen-14b-chat": 0.008 * RMB,
"qwen-1.8b-longcontext-chat": 1.4286, "qwen-7b-chat": 0.006 * RMB,
"qwen2-vl-7b-instruct": 1.4286, "qwen-1.8b-chat": 0.006 * RMB,
"qwen2-vl-2b-instruct": 1.4286, "qwen-1.8b-longcontext-chat": 0.006 * RMB,
"qwen-vl-v1": 1.4286, "qvq-72b-preview": 0.012 * RMB,
"qwen-vl-chat-v1": 1.4286, "qwen2.5-vl-72b-instruct": 0.016 * RMB,
"qwen2-audio-instruct": 1.4286, "qwen2.5-vl-7b-instruct": 0.002 * RMB,
"qwen-audio-chat": 1.4286, "qwen2.5-vl-3b-instruct": 0.0012 * RMB,
"qwen2.5-math-72b-instruct": 1.4286, "qwen2-vl-7b-instruct": 0.016 * RMB,
"qwen2.5-math-7b-instruct": 1.4286, "qwen2-vl-2b-instruct": 0.002 * RMB,
"qwen2.5-math-1.5b-instruct": 1.4286, "qwen-vl-v1": 0.002 * RMB,
"qwen2-math-72b-instruct": 1.4286, "qwen-vl-chat-v1": 0.002 * RMB,
"qwen2-math-7b-instruct": 1.4286, "qwen2-audio-instruct": 0.002 * RMB,
"qwen2-math-1.5b-instruct": 1.4286, "qwen-audio-chat": 0.002 * RMB,
"qwen2.5-coder-32b-instruct": 1.4286, "qwen2.5-math-72b-instruct": 0.004 * RMB,
"qwen2.5-coder-14b-instruct": 1.4286, "qwen2.5-math-7b-instruct": 0.001 * RMB,
"qwen2.5-coder-7b-instruct": 1.4286, "qwen2.5-math-1.5b-instruct": 0.001 * RMB,
"qwen2.5-coder-3b-instruct": 1.4286, "qwen2-math-72b-instruct": 0.004 * RMB,
"qwen2.5-coder-1.5b-instruct": 1.4286, "qwen2-math-7b-instruct": 0.001 * RMB,
"qwen2.5-coder-0.5b-instruct": 1.4286, "qwen2-math-1.5b-instruct": 0.001 * RMB,
"text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens "qwen2.5-coder-32b-instruct": 0.002 * RMB,
"text-embedding-v3": 0.05, "qwen2.5-coder-14b-instruct": 0.002 * RMB,
"text-embedding-v2": 0.05, "qwen2.5-coder-7b-instruct": 0.001 * RMB,
"text-embedding-async-v2": 0.05, "qwen2.5-coder-3b-instruct": 0.001 * RMB,
"text-embedding-async-v1": 0.05, "qwen2.5-coder-1.5b-instruct": 0.001 * RMB,
"ali-stable-diffusion-xl": 8.00, "qwen2.5-coder-0.5b-instruct": 0.001 * RMB,
"ali-stable-diffusion-v1.5": 8.00, "text-embedding-v1": 0.0007 * RMB, // ¥0.0007 / 1k tokens
"wanx-v1": 8.00, "text-embedding-v3": 0.0007 * RMB,
"SparkDesk": 1.2858, // ¥0.018 / 1k tokens "text-embedding-v2": 0.0007 * RMB,
"SparkDesk-v1.1": 1.2858, // ¥0.018 / 1k tokens "text-embedding-async-v2": 0.0007 * RMB,
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens "text-embedding-async-v1": 0.0007 * RMB,
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens "ali-stable-diffusion-xl": 8.00,
"SparkDesk-v3.1-128K": 1.2858, // ¥0.018 / 1k tokens "ali-stable-diffusion-v1.5": 8.00,
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens "wanx-v1": 8.00,
"SparkDesk-v3.5-32K": 1.2858, // ¥0.018 / 1k tokens "deepseek-r1": 0.002 * RMB,
"SparkDesk-v4.0": 1.2858, // ¥0.018 / 1k tokens "deepseek-v3": 0.001 * RMB,
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens "deepseek-r1-distill-qwen-1.5b": 0.001 * RMB,
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens "deepseek-r1-distill-qwen-7b": 0.0005 * RMB,
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens "deepseek-r1-distill-qwen-14b": 0.001 * RMB,
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens "deepseek-r1-distill-qwen-32b": 0.002 * RMB,
"deepseek-r1-distill-llama-8b": 0.0005 * RMB,
"deepseek-r1-distill-llama-70b": 0.004 * RMB,
"SparkDesk": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v1.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.1-128K": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.5-32K": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v4.0": 1.2858, // ¥0.018 / 1k tokens
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens
// https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0 // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
"hunyuan-turbo": 0.015 * RMB, "hunyuan-turbo": 0.015 * RMB,
"hunyuan-large": 0.004 * RMB, "hunyuan-large": 0.004 * RMB,
@@ -371,6 +389,238 @@ var ModelRatio = map[string]float64{
"mistralai/mistral-7b-instruct-v0.2": 0.050 * USD, "mistralai/mistral-7b-instruct-v0.2": 0.050 * USD,
"mistralai/mistral-7b-v0.1": 0.050 * USD, "mistralai/mistral-7b-v0.1": 0.050 * USD,
"mistralai/mixtral-8x7b-instruct-v0.1": 0.300 * USD, "mistralai/mixtral-8x7b-instruct-v0.1": 0.300 * USD,
//https://openrouter.ai/models
"01-ai/yi-large": 1.5,
"aetherwiing/mn-starcannon-12b": 0.6,
"ai21/jamba-1-5-large": 4.0,
"ai21/jamba-1-5-mini": 0.2,
"ai21/jamba-instruct": 0.35,
"aion-labs/aion-1.0": 6.0,
"aion-labs/aion-1.0-mini": 1.2,
"aion-labs/aion-rp-llama-3.1-8b": 0.1,
"allenai/llama-3.1-tulu-3-405b": 5.0,
"alpindale/goliath-120b": 4.6875,
"alpindale/magnum-72b": 1.125,
"amazon/nova-lite-v1": 0.12,
"amazon/nova-micro-v1": 0.07,
"amazon/nova-pro-v1": 1.6,
"anthracite-org/magnum-v2-72b": 1.5,
"anthracite-org/magnum-v4-72b": 1.125,
"anthropic/claude-2": 12.0,
"anthropic/claude-2.0": 12.0,
"anthropic/claude-2.0:beta": 12.0,
"anthropic/claude-2.1": 12.0,
"anthropic/claude-2.1:beta": 12.0,
"anthropic/claude-2:beta": 12.0,
"anthropic/claude-3-haiku": 0.625,
"anthropic/claude-3-haiku:beta": 0.625,
"anthropic/claude-3-opus": 37.5,
"anthropic/claude-3-opus:beta": 37.5,
"anthropic/claude-3-sonnet": 7.5,
"anthropic/claude-3-sonnet:beta": 7.5,
"anthropic/claude-3.5-haiku": 2.0,
"anthropic/claude-3.5-haiku-20241022": 2.0,
"anthropic/claude-3.5-haiku-20241022:beta": 2.0,
"anthropic/claude-3.5-haiku:beta": 2.0,
"anthropic/claude-3.5-sonnet": 7.5,
"anthropic/claude-3.5-sonnet-20240620": 7.5,
"anthropic/claude-3.5-sonnet-20240620:beta": 7.5,
"anthropic/claude-3.5-sonnet:beta": 7.5,
"cognitivecomputations/dolphin-mixtral-8x22b": 0.45,
"cognitivecomputations/dolphin-mixtral-8x7b": 0.25,
"cohere/command": 0.95,
"cohere/command-r": 0.7125,
"cohere/command-r-03-2024": 0.7125,
"cohere/command-r-08-2024": 0.285,
"cohere/command-r-plus": 7.125,
"cohere/command-r-plus-04-2024": 7.125,
"cohere/command-r-plus-08-2024": 4.75,
"cohere/command-r7b-12-2024": 0.075,
"databricks/dbrx-instruct": 0.6,
"deepseek/deepseek-chat": 0.445,
"deepseek/deepseek-chat-v2.5": 1.0,
"deepseek/deepseek-chat:free": 0.0,
"deepseek/deepseek-r1": 1.2,
"deepseek/deepseek-r1-distill-llama-70b": 0.345,
"deepseek/deepseek-r1-distill-llama-70b:free": 0.0,
"deepseek/deepseek-r1-distill-llama-8b": 0.02,
"deepseek/deepseek-r1-distill-qwen-1.5b": 0.09,
"deepseek/deepseek-r1-distill-qwen-14b": 0.075,
"deepseek/deepseek-r1-distill-qwen-32b": 0.09,
"deepseek/deepseek-r1:free": 0.0,
"eva-unit-01/eva-llama-3.33-70b": 3.0,
"eva-unit-01/eva-qwen-2.5-32b": 1.7,
"eva-unit-01/eva-qwen-2.5-72b": 3.0,
"google/gemini-2.0-flash-001": 0.2,
"google/gemini-2.0-flash-exp:free": 0.0,
"google/gemini-2.0-flash-lite-preview-02-05:free": 0.0,
"google/gemini-2.0-flash-thinking-exp-1219:free": 0.0,
"google/gemini-2.0-flash-thinking-exp:free": 0.0,
"google/gemini-2.0-pro-exp-02-05:free": 0.0,
"google/gemini-exp-1206:free": 0.0,
"google/gemini-flash-1.5": 0.15,
"google/gemini-flash-1.5-8b": 0.075,
"google/gemini-flash-1.5-8b-exp": 0.0,
"google/gemini-pro": 0.75,
"google/gemini-pro-1.5": 2.5,
"google/gemini-pro-vision": 0.75,
"google/gemma-2-27b-it": 0.135,
"google/gemma-2-9b-it": 0.03,
"google/gemma-2-9b-it:free": 0.0,
"google/gemma-7b-it": 0.075,
"google/learnlm-1.5-pro-experimental:free": 0.0,
"google/palm-2-chat-bison": 1.0,
"google/palm-2-chat-bison-32k": 1.0,
"google/palm-2-codechat-bison": 1.0,
"google/palm-2-codechat-bison-32k": 1.0,
"gryphe/mythomax-l2-13b": 0.0325,
"gryphe/mythomax-l2-13b:free": 0.0,
"huggingfaceh4/zephyr-7b-beta:free": 0.0,
"infermatic/mn-inferor-12b": 0.6,
"inflection/inflection-3-pi": 5.0,
"inflection/inflection-3-productivity": 5.0,
"jondurbin/airoboros-l2-70b": 0.25,
"liquid/lfm-3b": 0.01,
"liquid/lfm-40b": 0.075,
"liquid/lfm-7b": 0.005,
"mancer/weaver": 1.125,
"meta-llama/llama-2-13b-chat": 0.11,
"meta-llama/llama-2-70b-chat": 0.45,
"meta-llama/llama-3-70b-instruct": 0.2,
"meta-llama/llama-3-8b-instruct": 0.03,
"meta-llama/llama-3-8b-instruct:free": 0.0,
"meta-llama/llama-3.1-405b": 1.0,
"meta-llama/llama-3.1-405b-instruct": 0.4,
"meta-llama/llama-3.1-70b-instruct": 0.15,
"meta-llama/llama-3.1-8b-instruct": 0.025,
"meta-llama/llama-3.2-11b-vision-instruct": 0.0275,
"meta-llama/llama-3.2-11b-vision-instruct:free": 0.0,
"meta-llama/llama-3.2-1b-instruct": 0.005,
"meta-llama/llama-3.2-3b-instruct": 0.0125,
"meta-llama/llama-3.2-90b-vision-instruct": 0.8,
"meta-llama/llama-3.3-70b-instruct": 0.15,
"meta-llama/llama-3.3-70b-instruct:free": 0.0,
"meta-llama/llama-guard-2-8b": 0.1,
"microsoft/phi-3-medium-128k-instruct": 0.5,
"microsoft/phi-3-medium-128k-instruct:free": 0.0,
"microsoft/phi-3-mini-128k-instruct": 0.05,
"microsoft/phi-3-mini-128k-instruct:free": 0.0,
"microsoft/phi-3.5-mini-128k-instruct": 0.05,
"microsoft/phi-4": 0.07,
"microsoft/wizardlm-2-7b": 0.035,
"microsoft/wizardlm-2-8x22b": 0.25,
"minimax/minimax-01": 0.55,
"mistralai/codestral-2501": 0.45,
"mistralai/codestral-mamba": 0.125,
"mistralai/ministral-3b": 0.02,
"mistralai/ministral-8b": 0.05,
"mistralai/mistral-7b-instruct": 0.0275,
"mistralai/mistral-7b-instruct-v0.1": 0.1,
"mistralai/mistral-7b-instruct-v0.3": 0.0275,
"mistralai/mistral-7b-instruct:free": 0.0,
"mistralai/mistral-large": 3.0,
"mistralai/mistral-large-2407": 3.0,
"mistralai/mistral-large-2411": 3.0,
"mistralai/mistral-medium": 4.05,
"mistralai/mistral-nemo": 0.04,
"mistralai/mistral-nemo:free": 0.0,
"mistralai/mistral-small": 0.3,
"mistralai/mistral-small-24b-instruct-2501": 0.07,
"mistralai/mistral-small-24b-instruct-2501:free": 0.0,
"mistralai/mistral-tiny": 0.125,
"mistralai/mixtral-8x22b-instruct": 0.45,
"mistralai/mixtral-8x7b": 0.3,
"mistralai/mixtral-8x7b-instruct": 0.12,
"mistralai/pixtral-12b": 0.05,
"mistralai/pixtral-large-2411": 3.0,
"neversleep/llama-3-lumimaid-70b": 2.25,
"neversleep/llama-3-lumimaid-8b": 0.5625,
"neversleep/llama-3-lumimaid-8b:extended": 0.5625,
"neversleep/llama-3.1-lumimaid-70b": 2.25,
"neversleep/llama-3.1-lumimaid-8b": 0.5625,
"neversleep/noromaid-20b": 1.125,
"nothingiisreal/mn-celeste-12b": 0.6,
"nousresearch/hermes-2-pro-llama-3-8b": 0.02,
"nousresearch/hermes-3-llama-3.1-405b": 0.4,
"nousresearch/hermes-3-llama-3.1-70b": 0.15,
"nousresearch/nous-hermes-2-mixtral-8x7b-dpo": 0.3,
"nousresearch/nous-hermes-llama2-13b": 0.085,
"nvidia/llama-3.1-nemotron-70b-instruct": 0.15,
"nvidia/llama-3.1-nemotron-70b-instruct:free": 0.0,
"openai/chatgpt-4o-latest": 7.5,
"openai/gpt-3.5-turbo": 0.75,
"openai/gpt-3.5-turbo-0125": 0.75,
"openai/gpt-3.5-turbo-0613": 1.0,
"openai/gpt-3.5-turbo-1106": 1.0,
"openai/gpt-3.5-turbo-16k": 2.0,
"openai/gpt-3.5-turbo-instruct": 1.0,
"openai/gpt-4": 30.0,
"openai/gpt-4-0314": 30.0,
"openai/gpt-4-1106-preview": 15.0,
"openai/gpt-4-32k": 60.0,
"openai/gpt-4-32k-0314": 60.0,
"openai/gpt-4-turbo": 15.0,
"openai/gpt-4-turbo-preview": 15.0,
"openai/gpt-4o": 5.0,
"openai/gpt-4o-2024-05-13": 7.5,
"openai/gpt-4o-2024-08-06": 5.0,
"openai/gpt-4o-2024-11-20": 5.0,
"openai/gpt-4o-mini": 0.3,
"openai/gpt-4o-mini-2024-07-18": 0.3,
"openai/gpt-4o:extended": 9.0,
"openai/o1": 30.0,
"openai/o1-mini": 2.2,
"openai/o1-mini-2024-09-12": 2.2,
"openai/o1-preview": 30.0,
"openai/o1-preview-2024-09-12": 30.0,
"openai/o3-mini": 2.2,
"openai/o3-mini-high": 2.2,
"openchat/openchat-7b": 0.0275,
"openchat/openchat-7b:free": 0.0,
"openrouter/auto": -500000.0,
"perplexity/llama-3.1-sonar-huge-128k-online": 2.5,
"perplexity/llama-3.1-sonar-large-128k-chat": 0.5,
"perplexity/llama-3.1-sonar-large-128k-online": 0.5,
"perplexity/llama-3.1-sonar-small-128k-chat": 0.1,
"perplexity/llama-3.1-sonar-small-128k-online": 0.1,
"perplexity/sonar": 0.5,
"perplexity/sonar-reasoning": 2.5,
"pygmalionai/mythalion-13b": 0.6,
"qwen/qvq-72b-preview": 0.25,
"qwen/qwen-2-72b-instruct": 0.45,
"qwen/qwen-2-7b-instruct": 0.027,
"qwen/qwen-2-7b-instruct:free": 0.0,
"qwen/qwen-2-vl-72b-instruct": 0.2,
"qwen/qwen-2-vl-7b-instruct": 0.05,
"qwen/qwen-2.5-72b-instruct": 0.2,
"qwen/qwen-2.5-7b-instruct": 0.025,
"qwen/qwen-2.5-coder-32b-instruct": 0.08,
"qwen/qwen-max": 3.2,
"qwen/qwen-plus": 0.6,
"qwen/qwen-turbo": 0.1,
"qwen/qwen-vl-plus:free": 0.0,
"qwen/qwen2.5-vl-72b-instruct:free": 0.0,
"qwen/qwq-32b-preview": 0.09,
"raifle/sorcererlm-8x22b": 2.25,
"sao10k/fimbulvetr-11b-v2": 0.6,
"sao10k/l3-euryale-70b": 0.4,
"sao10k/l3-lunaris-8b": 0.03,
"sao10k/l3.1-70b-hanami-x1": 1.5,
"sao10k/l3.1-euryale-70b": 0.4,
"sao10k/l3.3-euryale-70b": 0.4,
"sophosympatheia/midnight-rose-70b": 0.4,
"sophosympatheia/rogue-rose-103b-v0.2:free": 0.0,
"teknium/openhermes-2.5-mistral-7b": 0.085,
"thedrummer/rocinante-12b": 0.25,
"thedrummer/unslopnemo-12b": 0.25,
"undi95/remm-slerp-l2-13b": 0.6,
"undi95/toppy-m-7b": 0.035,
"undi95/toppy-m-7b:free": 0.0,
"x-ai/grok-2-1212": 5.0,
"x-ai/grok-2-vision-1212": 5.0,
"x-ai/grok-beta": 7.5,
"x-ai/grok-vision-beta": 7.5,
"xwin-lm/xwin-lm-70b": 1.875,
} }
var CompletionRatio = map[string]float64{ var CompletionRatio = map[string]float64{

View File

@@ -51,5 +51,7 @@ const (
BaiduV2 BaiduV2
XunfeiV2 XunfeiV2
AliBailian AliBailian
OpenAICompatible
GeminiOpenAICompatible
Dummy Dummy
) )

View File

@@ -51,6 +51,9 @@ var ChannelBaseURLs = []string{
"https://qianfan.baidubce.com", // 47 "https://qianfan.baidubce.com", // 47
"https://spark-api-open.xf-yun.com", // 48 "https://spark-api-open.xf-yun.com", // 48
"https://dashscope.aliyuncs.com", // 49 "https://dashscope.aliyuncs.com", // 49
"", // 50
"https://generativelanguage.googleapis.com/v1beta/openai/", // 51
} }
func init() { func init() {

View File

@@ -102,6 +102,9 @@ func postConsumeQuota(ctx context.Context, usage *relaymodel.Usage, meta *meta.M
var quota int64 var quota int64
completionRatio := billingratio.GetCompletionRatio(textRequest.Model, meta.ChannelType) completionRatio := billingratio.GetCompletionRatio(textRequest.Model, meta.ChannelType)
promptTokens := usage.PromptTokens promptTokens := usage.PromptTokens
// It appears that DeepSeek's official service automatically merges ReasoningTokens into CompletionTokens,
// but the behavior of third-party providers may differ, so for now we do not add them manually.
// completionTokens := usage.CompletionTokens + usage.CompletionTokensDetails.ReasoningTokens
completionTokens := usage.CompletionTokens completionTokens := usage.CompletionTokens
quota = int64(math.Ceil((float64(promptTokens) + float64(completionTokens)*completionRatio) * ratio)) quota = int64(math.Ceil((float64(promptTokens) + float64(completionTokens)*completionRatio) * ratio))
if ratio != 0 && quota <= 0 { if ratio != 0 && quota <= 0 {

View File

@@ -1,5 +1,7 @@
package model package model
import "github.com/songquanpeng/one-api/relay/adaptor/openrouter"
type ResponseFormat struct { type ResponseFormat struct {
Type string `json:"type,omitempty"` Type string `json:"type,omitempty"`
JsonSchema *JSONSchema `json:"json_schema,omitempty"` JsonSchema *JSONSchema `json:"json_schema,omitempty"`
@@ -66,6 +68,21 @@ type GeneralOpenAIRequest struct {
// Others // Others
Instruction string `json:"instruction,omitempty"` Instruction string `json:"instruction,omitempty"`
NumCtx int `json:"num_ctx,omitempty"` NumCtx int `json:"num_ctx,omitempty"`
// -------------------------------------
// Openrouter
// -------------------------------------
Provider *openrouter.RequestProvider `json:"provider,omitempty"`
IncludeReasoning *bool `json:"include_reasoning,omitempty"`
// -------------------------------------
// Anthropic
// -------------------------------------
Thinking *Thinking `json:"thinking,omitempty"`
}
// https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#implementing-extended-thinking
type Thinking struct {
Type string `json:"type"`
BudgetTokens int `json:"budget_tokens" binding:"omitempty,min=1024"`
} }
func (r GeneralOpenAIRequest) ParseInput() []string { func (r GeneralOpenAIRequest) ParseInput() []string {

View File

@@ -1,12 +1,35 @@
package model package model
type Message struct { type Message struct {
Role string `json:"role,omitempty"` Role string `json:"role,omitempty"`
Content any `json:"content,omitempty"` // Content is a string or a list of objects
ReasoningContent any `json:"reasoning_content,omitempty"` Content any `json:"content,omitempty"`
Name *string `json:"name,omitempty"` Name *string `json:"name,omitempty"`
ToolCalls []Tool `json:"tool_calls,omitempty"` ToolCalls []Tool `json:"tool_calls,omitempty"`
ToolCallId string `json:"tool_call_id,omitempty"` ToolCallId string `json:"tool_call_id,omitempty"`
Audio *messageAudio `json:"audio,omitempty"`
// -------------------------------------
// Deepseek 专有的一些字段
// https://api-docs.deepseek.com/api/create-chat-completion
// -------------------------------------
// Prefix forces the model to begin its answer with the supplied prefix in the assistant message.
// To enable this feature, set base_url to "https://api.deepseek.com/beta".
Prefix *bool `json:"prefix,omitempty"` // ReasoningContent is Used for the deepseek-reasoner model in the Chat
// Prefix Completion feature as the input for the CoT in the last assistant message.
// When using this feature, the prefix parameter must be set to true.
ReasoningContent *string `json:"reasoning_content,omitempty"`
// -------------------------------------
// Openrouter
// -------------------------------------
Reasoning *string `json:"reasoning,omitempty"`
Refusal *bool `json:"refusal,omitempty"`
}
type messageAudio struct {
Id string `json:"id"`
Data string `json:"data,omitempty"`
ExpiredAt int `json:"expired_at,omitempty"`
Transcript string `json:"transcript,omitempty"`
} }
func (m Message) IsStringContent() bool { func (m Message) IsStringContent() bool {

View File

@@ -4,14 +4,12 @@ type Usage struct {
PromptTokens int `json:"prompt_tokens"` PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"` CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"` TotalTokens int `json:"total_tokens"`
// PromptTokensDetails may be empty for some models
CompletionTokensDetails *CompletionTokensDetails `json:"completion_tokens_details,omitempty"` PromptTokensDetails *usagePromptTokensDetails `gorm:"-" json:"prompt_tokens_details,omitempty"`
} // CompletionTokensDetails may be empty for some models
CompletionTokensDetails *usageCompletionTokensDetails `gorm:"-" json:"completion_tokens_details,omitempty"`
type CompletionTokensDetails struct { ServiceTier string `gorm:"-" json:"service_tier,omitempty"`
ReasoningTokens int `json:"reasoning_tokens"` SystemFingerprint string `gorm:"-" json:"system_fingerprint,omitempty"`
AcceptedPredictionTokens int `json:"accepted_prediction_tokens"`
RejectedPredictionTokens int `json:"rejected_prediction_tokens"`
} }
type Error struct { type Error struct {
@@ -25,3 +23,20 @@ type ErrorWithStatusCode struct {
Error Error
StatusCode int `json:"status_code"` StatusCode int `json:"status_code"`
} }
type usagePromptTokensDetails struct {
CachedTokens int `json:"cached_tokens"`
AudioTokens int `json:"audio_tokens"`
// TextTokens could be zero for pure text chats
TextTokens int `json:"text_tokens"`
ImageTokens int `json:"image_tokens"`
}
type usageCompletionTokensDetails struct {
ReasoningTokens int `json:"reasoning_tokens"`
AudioTokens int `json:"audio_tokens"`
AcceptedPredictionTokens int `json:"accepted_prediction_tokens"`
RejectedPredictionTokens int `json:"rejected_prediction_tokens"`
// TextTokens could be zero for pure text chats
TextTokens int `json:"text_tokens"`
}

View File

@@ -1,17 +1,7 @@
import React, { useEffect, useState } from 'react'; import React, {useEffect, useState} from 'react';
import { useTranslation } from 'react-i18next'; import {useTranslation} from 'react-i18next';
import { import {Button, Dropdown, Form, Input, Label, Message, Pagination, Popup, Table,} from 'semantic-ui-react';
Button, import {Link} from 'react-router-dom';
Dropdown,
Form,
Input,
Label,
Message,
Pagination,
Popup,
Table,
} from 'semantic-ui-react';
import { Link } from 'react-router-dom';
import { import {
API, API,
loadChannelModels, loadChannelModels,
@@ -23,8 +13,8 @@ import {
timestamp2string, timestamp2string,
} from '../helpers'; } from '../helpers';
import { CHANNEL_OPTIONS, ITEMS_PER_PAGE } from '../constants'; import {CHANNEL_OPTIONS, ITEMS_PER_PAGE} from '../constants';
import { renderGroup, renderNumber } from '../helpers/render'; import {renderGroup, renderNumber} from '../helpers/render';
function renderTimestamp(timestamp) { function renderTimestamp(timestamp) {
return <>{timestamp2string(timestamp)}</>; return <>{timestamp2string(timestamp)}</>;
@@ -54,6 +44,9 @@ function renderType(type, t) {
function renderBalance(type, balance, t) { function renderBalance(type, balance, t) {
switch (type) { switch (type) {
case 1: // OpenAI case 1: // OpenAI
if (balance === 0) {
return <span>{t('channel.table.balance_not_supported')}</span>;
}
return <span>${balance.toFixed(2)}</span>; return <span>${balance.toFixed(2)}</span>;
case 4: // CloseAI case 4: // CloseAI
return <span>¥{balance.toFixed(2)}</span>; return <span>¥{balance.toFixed(2)}</span>;

View File

@@ -1,12 +1,26 @@
export const CHANNEL_OPTIONS = [ export const CHANNEL_OPTIONS = [
{key: 1, text: 'OpenAI', value: 1, color: 'green'}, { key: 1, text: 'OpenAI', value: 1, color: 'green' },
{key: 14, text: 'Anthropic Claude', value: 14, color: 'black'}, {
{key: 33, text: 'AWS', value: 33, color: 'black'}, key: 50,
{key: 3, text: 'Azure OpenAI', value: 3, color: 'olive'}, text: 'OpenAI 兼容',
{key: 11, text: 'Google PaLM2', value: 11, color: 'orange'}, value: 50,
{key: 24, text: 'Google Gemini', value: 24, color: 'orange'}, color: 'olive',
{key: 28, text: 'Mistral AI', value: 28, color: 'orange'}, description: 'OpenAI 兼容渠道,支持设置 Base URL',
{key: 41, text: 'Novita', value: 41, color: 'purple'}, },
{key: 14, text: 'Anthropic', value: 14, color: 'black'},
{ key: 33, text: 'AWS', value: 33, color: 'black' },
{key: 3, text: 'Azure', value: 3, color: 'olive'},
{key: 11, text: 'PaLM2', value: 11, color: 'orange'},
{key: 24, text: 'Gemini', value: 24, color: 'orange'},
{
key: 51,
text: 'Gemini (OpenAI)',
value: 51,
color: 'orange',
description: 'Gemini OpenAI 兼容格式',
},
{ key: 28, text: 'Mistral AI', value: 28, color: 'orange' },
{ key: 41, text: 'Novita', value: 41, color: 'purple' },
{ {
key: 40, key: 40,
text: '字节火山引擎', text: '字节火山引擎',
@@ -28,14 +42,14 @@ export const CHANNEL_OPTIONS = [
color: 'blue', color: 'blue',
tip: '请前往<a href="https://console.bce.baidu.com/iam/#/iam/apikey/list" target="_blank">此处</a>获取 API Key注意本渠道仅支持<a target="_blank" href="https://cloud.baidu.com/doc/WENXINWORKSHOP/s/em4tsqo3v">推理服务 V2</a>相关模型', tip: '请前往<a href="https://console.bce.baidu.com/iam/#/iam/apikey/list" target="_blank">此处</a>获取 API Key注意本渠道仅支持<a target="_blank" href="https://cloud.baidu.com/doc/WENXINWORKSHOP/s/em4tsqo3v">推理服务 V2</a>相关模型',
}, },
{ {
key: 17, key: 17,
text: '阿里通义千问', text: '阿里通义千问',
value: 17, value: 17,
color: 'orange', color: 'orange',
tip: '如需使用阿里云百炼,请使用<strong>阿里云百炼</strong>渠道', tip: '如需使用阿里云百炼,请使用<strong>阿里云百炼</strong>渠道',
}, },
{key: 49, text: '阿里云百炼', value: 49, color: 'orange'}, { key: 49, text: '阿里云百炼', value: 49, color: 'orange' },
{ {
key: 18, key: 18,
text: '讯飞星火认知', text: '讯飞星火认知',
@@ -50,38 +64,45 @@ export const CHANNEL_OPTIONS = [
color: 'blue', color: 'blue',
tip: 'HTTP 版本的讯飞接口,前往<a href="https://console.xfyun.cn/services/cbm" target="_blank">此处</a>获取 HTTP 服务接口认证密钥', tip: 'HTTP 版本的讯飞接口,前往<a href="https://console.xfyun.cn/services/cbm" target="_blank">此处</a>获取 HTTP 服务接口认证密钥',
}, },
{key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet'}, { key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet' },
{key: 19, text: '360 智脑', value: 19, color: 'blue'}, { key: 19, text: '360 智脑', value: 19, color: 'blue' },
{key: 25, text: 'Moonshot AI', value: 25, color: 'black'}, { key: 25, text: 'Moonshot AI', value: 25, color: 'black' },
{key: 23, text: '腾讯混元', value: 23, color: 'teal'}, { key: 23, text: '腾讯混元', value: 23, color: 'teal' },
{key: 26, text: '百川大模型', value: 26, color: 'orange'}, { key: 26, text: '百川大模型', value: 26, color: 'orange' },
{key: 27, text: 'MiniMax', value: 27, color: 'red'}, { key: 27, text: 'MiniMax', value: 27, color: 'red' },
{key: 29, text: 'Groq', value: 29, color: 'orange'}, { key: 29, text: 'Groq', value: 29, color: 'orange' },
{key: 30, text: 'Ollama', value: 30, color: 'black'}, { key: 30, text: 'Ollama', value: 30, color: 'black' },
{key: 31, text: '零一万物', value: 31, color: 'green'}, { key: 31, text: '零一万物', value: 31, color: 'green' },
{key: 32, text: '阶跃星辰', value: 32, color: 'blue'}, { key: 32, text: '阶跃星辰', value: 32, color: 'blue' },
{key: 34, text: 'Coze', value: 34, color: 'blue'}, { key: 34, text: 'Coze', value: 34, color: 'blue' },
{key: 35, text: 'Cohere', value: 35, color: 'blue'}, { key: 35, text: 'Cohere', value: 35, color: 'blue' },
{key: 36, text: 'DeepSeek', value: 36, color: 'black'}, { key: 36, text: 'DeepSeek', value: 36, color: 'black' },
{key: 37, text: 'Cloudflare', value: 37, color: 'orange'}, { key: 37, text: 'Cloudflare', value: 37, color: 'orange' },
{key: 38, text: 'DeepL', value: 38, color: 'black'}, { key: 38, text: 'DeepL', value: 38, color: 'black' },
{key: 39, text: 'together.ai', value: 39, color: 'blue'}, { key: 39, text: 'together.ai', value: 39, color: 'blue' },
{key: 42, text: 'VertexAI', value: 42, color: 'blue'}, { key: 42, text: 'VertexAI', value: 42, color: 'blue' },
{key: 43, text: 'Proxy', value: 43, color: 'blue'}, { key: 43, text: 'Proxy', value: 43, color: 'blue' },
{key: 44, text: 'SiliconFlow', value: 44, color: 'blue'}, { key: 44, text: 'SiliconFlow', value: 44, color: 'blue' },
{key: 45, text: 'xAI', value: 45, color: 'blue'}, { key: 45, text: 'xAI', value: 45, color: 'blue' },
{key: 46, text: 'Replicate', value: 46, color: 'blue'}, { key: 46, text: 'Replicate', value: 46, color: 'blue' },
{key: 8, text: '自定义渠道', value: 8, color: 'pink'}, {
{key: 22, text: '知识库FastGPT', value: 22, color: 'blue'}, key: 8,
{key: 21, text: '知识库AI Proxy', value: 21, color: 'purple'}, text: '自定义渠道',
{key: 20, text: 'OpenRouter', value: 20, color: 'black'}, value: 8,
{key: 2, text: '代理API2D', value: 2, color: 'blue'}, color: 'pink',
{key: 5, text: '代理OpenAI-SB', value: 5, color: 'brown'}, tip: '不推荐使用,请使用 <strong>OpenAI 兼容</strong>渠道类型。注意,这里所需要填入的代理地址仅会在实际请求时替换域名部分,如果你想填入 OpenAI SDK 中所要求的 Base URL请使用 OpenAI 兼容渠道类型',
{key: 7, text: '代理OhMyGPT', value: 7, color: 'purple'}, description: '不推荐使用,请使用 OpenAI 兼容渠道类型',
{key: 10, text: '代理AI Proxy', value: 10, color: 'purple'}, },
{key: 4, text: '代理CloseAI', value: 4, color: 'teal'}, { key: 22, text: '知识库FastGPT', value: 22, color: 'blue' },
{key: 6, text: '代理OpenAI Max', value: 6, color: 'violet'}, { key: 21, text: '知识库AI Proxy', value: 21, color: 'purple' },
{key: 9, text: '代理AI.LS', value: 9, color: 'yellow'}, { key: 20, text: 'OpenRouter', value: 20, color: 'black' },
{key: 12, text: '代理API2GPT', value: 12, color: 'blue'}, { key: 2, text: '代理API2D', value: 2, color: 'blue' },
{key: 13, text: '代理:AIGC2D', value: 13, color: 'purple'}, { key: 5, text: '代理:OpenAI-SB', value: 5, color: 'brown' },
{ key: 7, text: '代理OhMyGPT', value: 7, color: 'purple' },
{ key: 10, text: '代理AI Proxy', value: 10, color: 'purple' },
{ key: 4, text: '代理CloseAI', value: 4, color: 'teal' },
{ key: 6, text: '代理OpenAI Max', value: 6, color: 'violet' },
{ key: 9, text: '代理AI.LS', value: 9, color: 'yellow' },
{ key: 12, text: '代理API2GPT', value: 12, color: 'blue' },
{ key: 13, text: '代理AIGC2D', value: 13, color: 'purple' },
]; ];

View File

@@ -104,8 +104,10 @@
"model_mapping_placeholder": "Optional, used to modify model names in request body. A JSON string where keys are request model names and values are target model names", "model_mapping_placeholder": "Optional, used to modify model names in request body. A JSON string where keys are request model names and values are target model names",
"system_prompt": "System Prompt", "system_prompt": "System Prompt",
"system_prompt_placeholder": "Optional, used to force set system prompt. Use with custom model & model mapping. First create a unique custom model name above, then map it to a natively supported model", "system_prompt_placeholder": "Optional, used to force set system prompt. Use with custom model & model mapping. First create a unique custom model name above, then map it to a natively supported model",
"base_url": "Proxy", "proxy_url": "Proxy",
"base_url_placeholder": "Optional, used for API calls through proxy. Enter proxy address in format: https://domain.com", "proxy_url_placeholder": "This is optional and used for API calls via a proxy. Please enter the proxy URL, formatted as: https://domain.com",
"base_url": "Base URL",
"base_url_placeholder": "The Base URL required by the OpenAPI SDK",
"key": "Key", "key": "Key",
"key_placeholder": "Please enter key", "key_placeholder": "Please enter key",
"batch": "Batch Create", "batch": "Batch Create",

View File

@@ -104,8 +104,10 @@
"model_mapping_placeholder": "此项可选,用于修改请求体中的模型名称,为一个 JSON 字符串,键为请求中模型名称,值为要替换的模型名称", "model_mapping_placeholder": "此项可选,用于修改请求体中的模型名称,为一个 JSON 字符串,键为请求中模型名称,值为要替换的模型名称",
"system_prompt": "系统提示词", "system_prompt": "系统提示词",
"system_prompt_placeholder": "此项可选,用于强制设置给定的系统提示词,请配合自定义模型 & 模型重定向使用,首先创建一个唯一的自定义模型名称并在上面填入,之后将该自定义模型重定向映射到该渠道一个原生支持的模型", "system_prompt_placeholder": "此项可选,用于强制设置给定的系统提示词,请配合自定义模型 & 模型重定向使用,首先创建一个唯一的自定义模型名称并在上面填入,之后将该自定义模型重定向映射到该渠道一个原生支持的模型",
"base_url": "代理", "proxy_url": "代理",
"base_url_placeholder": "此项可选,用于通过代理站来进行 API 调用请输入代理站地址格式为https://domain.com", "proxy_url_placeholder": "此项可选,用于通过代理站来进行 API 调用请输入代理站地址格式为https://domain.com。注意,这里所需要填入的代理地址仅会在实际请求时替换域名部分,如果你想填入 OpenAI SDK 中所要求的 Base URL请使用 OpenAI 兼容渠道类型",
"base_url": "Base URL",
"base_url_placeholder": "OpenAPI SDK 中所要求的 Base URL",
"key": "密钥", "key": "密钥",
"key_placeholder": "请输入密钥", "key_placeholder": "请输入密钥",
"batch": "批量创建", "batch": "批量创建",

View File

@@ -1,6 +1,6 @@
import React, {useEffect, useState} from 'react'; import React, {useEffect, useState} from 'react';
import {useTranslation} from 'react-i18next'; import {useTranslation} from 'react-i18next';
import {Button, Card, Form, Input, Message,} from 'semantic-ui-react'; import {Button, Card, Form, Input, Message} from 'semantic-ui-react';
import {useNavigate, useParams} from 'react-router-dom'; import {useNavigate, useParams} from 'react-router-dom';
import {API, copy, getChannelModels, showError, showInfo, showSuccess, verifyJSON,} from '../../helpers'; import {API, copy, getChannelModels, showError, showInfo, showSuccess, verifyJSON,} from '../../helpers';
import {CHANNEL_OPTIONS} from '../../constants'; import {CHANNEL_OPTIONS} from '../../constants';
@@ -339,6 +339,20 @@ const EditChannel = () => {
{inputs.type === 8 && ( {inputs.type === 8 && (
<Form.Field> <Form.Field>
<Form.Input <Form.Input
required
label={t('channel.edit.proxy_url')}
name='base_url'
placeholder={t('channel.edit.proxy_url_placeholder')}
onChange={handleInputChange}
value={inputs.base_url}
autoComplete='new-password'
/>
</Form.Field>
)}
{inputs.type === 50 && (
<Form.Field>
<Form.Input
required
label={t('channel.edit.base_url')} label={t('channel.edit.base_url')}
name='base_url' name='base_url'
placeholder={t('channel.edit.base_url_placeholder')} placeholder={t('channel.edit.base_url_placeholder')}
@@ -637,12 +651,13 @@ const EditChannel = () => {
{inputs.type !== 3 && {inputs.type !== 3 &&
inputs.type !== 33 && inputs.type !== 33 &&
inputs.type !== 8 && inputs.type !== 8 &&
inputs.type !== 50 &&
inputs.type !== 22 && ( inputs.type !== 22 && (
<Form.Field> <Form.Field>
<Form.Input <Form.Input
label={t('channel.edit.base_url')} label={t('channel.edit.proxy_url')}
name='base_url' name='base_url'
placeholder={t('channel.edit.base_url_placeholder')} placeholder={t('channel.edit.proxy_url_placeholder')}
onChange={handleInputChange} onChange={handleInputChange}
value={inputs.base_url} value={inputs.base_url}
autoComplete='new-password' autoComplete='new-password'