Compare commits

...

8 Commits

Author SHA1 Message Date
Deadwalk
8c01a989f1 Merge 290931b506 into 8df4a2670b 2025-09-28 08:48:30 +00:00
Deadwalk
290931b506 fix: improve Dockerfile build configuration for ARM64 compatibility
- Add comprehensive npm mirror configuration for faster package downloads
- Use parallel npm install and build processes for better performance
- Configure Go proxy and Alpine package mirrors for network reliability
- Fix frontend dependency issues that caused blank page display

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-28 16:48:24 +08:00
Deadwalk
48396d3f33 fix: 修复CodeReview发现的安全问题和代码质量问题 | fix security and code quality issues identified by CodeReview
- 修复JSON注入漏洞:使用json.Marshal()安全转义字符串内容
- 定义常量CHARS_PER_TOKEN替换硬编码的token估算数字4
- 处理UnmarshalJSON错误,避免静默失败并记录错误日志
- 定义常量替换硬编码的API端点路径,提高可维护性

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-28 16:41:48 +08:00
Deadwalk
e27612a620 refactor: 优化Anthropic协议日志输出 | optimize Anthropic protocol log output
- 清理不必要的调试日志,减少生产环境噪音
- 将详细日志从Infof降级为Debugf级别
- 保留关键错误日志和重要流程信息
- 优化日志结构,提高可读性和维护性

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-28 16:22:18 +08:00
Deadwalk
008ffe4662 feat: 支持Anthropic API协议 | support Anthropic API protocol
- 添加Anthropic适配器实现 | Add Anthropic adaptor implementation
- 支持Anthropic消息格式转换 | Support Anthropic message format conversion
- 添加Vertex AI Claude适配器支持 | Add Vertex AI Claude adapter support
- 更新Anthropic的中继模式定义 | Update relay mode definitions for Anthropic
- 添加Anthropic控制器和路由 | Add Anthropic controller and routing

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: zgdmemail@gmail.com
2025-09-28 11:13:55 +08:00
JustSong
8df4a2670b docs: update ByteDance Doubao model link in README
Some checks failed
CI / Unit tests (push) Has been cancelled
CI / commit_lint (push) Has been cancelled
2025-02-21 19:30:16 +08:00
longkeyy
7ac553541b feat: update openrouter models and price 20250213 (#2084)
Some checks failed
CI / Unit tests (push) Has been cancelled
CI / commit_lint (push) Has been cancelled
2025-02-16 18:01:59 +08:00
longkeyy
a5c517c27a feat: update ali models and price 20250213 (#2086) 2025-02-16 18:01:24 +08:00
16 changed files with 1117 additions and 127 deletions

View File

@@ -1,14 +1,28 @@
FROM --platform=$BUILDPLATFORM node:16 AS builder FROM --platform=$BUILDPLATFORM node:16 AS builder
# 配置npm使用淘宝镜像源
RUN npm config set registry https://registry.npmmirror.com && \
npm config set disturl https://npmmirror.com/dist && \
npm config set sass_binary_site https://npmmirror.com/mirrors/node-sass/ && \
npm config set electron_mirror https://npmmirror.com/mirrors/electron/ && \
npm config set puppeteer_download_host https://npmmirror.com/mirrors && \
npm config set chromedriver_cdnurl https://npmmirror.com/mirrors/chromedriver && \
npm config set operadriver_cdnurl https://npmmirror.com/mirrors/operadriver && \
npm config set phantomjs_cdnurl https://npmmirror.com/mirrors/phantomjs && \
npm config set selenium_cdnurl https://npmmirror.com/mirrors/selenium && \
npm config set node_inspector_cdnurl https://npmmirror.com/mirrors/node-inspector
WORKDIR /web WORKDIR /web
COPY ./VERSION . COPY ./VERSION .
COPY ./web . COPY ./web .
RUN npm install --prefix /web/default & \ # 并行安装npm依赖提高构建速度
npm install --prefix /web/berry & \ RUN npm install --prefix /web/default --prefer-offline --no-audit & \
npm install --prefix /web/air & \ npm install --prefix /web/berry --prefer-offline --no-audit & \
npm install --prefix /web/air --prefer-offline --no-audit & \
wait wait
# 并行构建前端项目,提高构建速度
RUN DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat ./VERSION) npm run build --prefix /web/default & \ RUN DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat ./VERSION) npm run build --prefix /web/default & \
DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat ./VERSION) npm run build --prefix /web/berry & \ DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat ./VERSION) npm run build --prefix /web/berry & \
DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat ./VERSION) npm run build --prefix /web/air & \ DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat ./VERSION) npm run build --prefix /web/air & \
@@ -16,16 +30,22 @@ RUN DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat ./VERSION) npm run buil
FROM golang:alpine AS builder2 FROM golang:alpine AS builder2
RUN apk add --no-cache \ # 配置Go使用国内镜像源
ENV GOPROXY=https://goproxy.cn,direct \
GOSUMDB=sum.golang.google.cn \
GO111MODULE=on \
CGO_ENABLED=1 \
GOOS=linux
# 使用阿里云 Alpine 源以加速 apk 包安装
RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories && \
apk update && \
apk add --no-cache \
gcc \ gcc \
musl-dev \ musl-dev \
sqlite-dev \ sqlite-dev \
build-base build-base
ENV GO111MODULE=on \
CGO_ENABLED=1 \
GOOS=linux
WORKDIR /build WORKDIR /build
ADD go.mod go.sum ./ ADD go.mod go.sum ./
@@ -38,7 +58,10 @@ RUN go build -trimpath -ldflags "-s -w -X 'github.com/songquanpeng/one-api/commo
FROM alpine:latest FROM alpine:latest
RUN apk add --no-cache ca-certificates tzdata # 使用阿里云 Alpine 源以加速 apk 包安装
RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories && \
apk update && \
apk add --no-cache ca-certificates tzdata
COPY --from=builder2 /build/one-api / COPY --from=builder2 /build/one-api /

View File

@@ -72,7 +72,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
+ [x] [Anthropic Claude 系列模型](https://anthropic.com) (支持 AWS Claude) + [x] [Anthropic Claude 系列模型](https://anthropic.com) (支持 AWS Claude)
+ [x] [Google PaLM2/Gemini 系列模型](https://developers.generativeai.google) + [x] [Google PaLM2/Gemini 系列模型](https://developers.generativeai.google)
+ [x] [Mistral 系列模型](https://mistral.ai/) + [x] [Mistral 系列模型](https://mistral.ai/)
+ [x] [字节跳动豆包大模型](https://console.volcengine.com/ark/region:ark+cn-beijing/model) + [x] [字节跳动豆包大模型(火山引擎)](https://www.volcengine.com/experience/ark?utm_term=202502dsinvite&ac=DSASUQY5&rc=2QXCA1VI)
+ [x] [百度文心一言系列模型](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html) + [x] [百度文心一言系列模型](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html)
+ [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html) + [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html)
+ [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html) + [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html)

View File

@@ -36,6 +36,8 @@ func relayHelper(c *gin.Context, relayMode int) *model.ErrorWithStatusCode {
err = controller.RelayAudioHelper(c, relayMode) err = controller.RelayAudioHelper(c, relayMode)
case relaymode.Proxy: case relaymode.Proxy:
err = controller.RelayProxyHelper(c, relayMode) err = controller.RelayProxyHelper(c, relayMode)
case relaymode.AnthropicMessages:
err = controller.RelayAnthropicHelper(c)
default: default:
err = controller.RelayTextHelper(c) err = controller.RelayTextHelper(c)
} }

View File

@@ -1,9 +1,9 @@
version: '3.4'
services: services:
one-api: one-api:
image: "${REGISTRY:-docker.io}/justsong/one-api:latest" # image: "${REGISTRY:-docker.io}/justsong/one-api:latest"
build: .
container_name: one-api container_name: one-api
platform: linux/amd64
restart: always restart: always
command: --log-dir /app/logs command: --log-dir /app/logs
ports: ports:

View File

@@ -14,10 +14,14 @@ var ModelList = []string{
"qwen2-72b-instruct", "qwen2-57b-a14b-instruct", "qwen2-7b-instruct", "qwen2-1.5b-instruct", "qwen2-0.5b-instruct", "qwen2-72b-instruct", "qwen2-57b-a14b-instruct", "qwen2-7b-instruct", "qwen2-1.5b-instruct", "qwen2-0.5b-instruct",
"qwen1.5-110b-chat", "qwen1.5-72b-chat", "qwen1.5-32b-chat", "qwen1.5-14b-chat", "qwen1.5-7b-chat", "qwen1.5-1.8b-chat", "qwen1.5-0.5b-chat", "qwen1.5-110b-chat", "qwen1.5-72b-chat", "qwen1.5-32b-chat", "qwen1.5-14b-chat", "qwen1.5-7b-chat", "qwen1.5-1.8b-chat", "qwen1.5-0.5b-chat",
"qwen-72b-chat", "qwen-14b-chat", "qwen-7b-chat", "qwen-1.8b-chat", "qwen-1.8b-longcontext-chat", "qwen-72b-chat", "qwen-14b-chat", "qwen-7b-chat", "qwen-1.8b-chat", "qwen-1.8b-longcontext-chat",
"qvq-72b-preview",
"qwen2.5-vl-72b-instruct", "qwen2.5-vl-7b-instruct", "qwen2.5-vl-2b-instruct", "qwen2.5-vl-1b-instruct", "qwen2.5-vl-0.5b-instruct",
"qwen2-vl-7b-instruct", "qwen2-vl-2b-instruct", "qwen-vl-v1", "qwen-vl-chat-v1", "qwen2-vl-7b-instruct", "qwen2-vl-2b-instruct", "qwen-vl-v1", "qwen-vl-chat-v1",
"qwen2-audio-instruct", "qwen-audio-chat", "qwen2-audio-instruct", "qwen-audio-chat",
"qwen2.5-math-72b-instruct", "qwen2.5-math-7b-instruct", "qwen2.5-math-1.5b-instruct", "qwen2-math-72b-instruct", "qwen2-math-7b-instruct", "qwen2-math-1.5b-instruct", "qwen2.5-math-72b-instruct", "qwen2.5-math-7b-instruct", "qwen2.5-math-1.5b-instruct", "qwen2-math-72b-instruct", "qwen2-math-7b-instruct", "qwen2-math-1.5b-instruct",
"qwen2.5-coder-32b-instruct", "qwen2.5-coder-14b-instruct", "qwen2.5-coder-7b-instruct", "qwen2.5-coder-3b-instruct", "qwen2.5-coder-1.5b-instruct", "qwen2.5-coder-0.5b-instruct", "qwen2.5-coder-32b-instruct", "qwen2.5-coder-14b-instruct", "qwen2.5-coder-7b-instruct", "qwen2.5-coder-3b-instruct", "qwen2.5-coder-1.5b-instruct", "qwen2.5-coder-0.5b-instruct",
"text-embedding-v1", "text-embedding-v3", "text-embedding-v2", "text-embedding-async-v2", "text-embedding-async-v1", "text-embedding-v1", "text-embedding-v3", "text-embedding-v2", "text-embedding-async-v2", "text-embedding-async-v1",
"ali-stable-diffusion-xl", "ali-stable-diffusion-v1.5", "wanx-v1", "ali-stable-diffusion-xl", "ali-stable-diffusion-v1.5", "wanx-v1",
"qwen-mt-plus", "qwen-mt-turbo",
"deepseek-r1", "deepseek-v3", "deepseek-r1-distill-qwen-1.5b", "deepseek-r1-distill-qwen-7b", "deepseek-r1-distill-qwen-14b", "deepseek-r1-distill-qwen-32b", "deepseek-r1-distill-llama-8b", "deepseek-r1-distill-llama-70b",
} }

View File

@@ -11,6 +11,14 @@ import (
"github.com/songquanpeng/one-api/relay/adaptor" "github.com/songquanpeng/one-api/relay/adaptor"
"github.com/songquanpeng/one-api/relay/meta" "github.com/songquanpeng/one-api/relay/meta"
"github.com/songquanpeng/one-api/relay/model" "github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/relaymode"
)
const (
// NativeAnthropicEndpoint is the endpoint for native Anthropic API
NativeAnthropicEndpoint = "/v1/messages"
// ThirdPartyAnthropicEndpoint is the endpoint for third-party providers supporting Anthropic protocol
ThirdPartyAnthropicEndpoint = "/anthropic/v1/messages"
) )
type Adaptor struct { type Adaptor struct {
@@ -21,7 +29,15 @@ func (a *Adaptor) Init(meta *meta.Meta) {
} }
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
return fmt.Sprintf("%s/v1/messages", meta.BaseURL), nil // For native Anthropic API
if strings.Contains(meta.BaseURL, "api.anthropic.com") {
return fmt.Sprintf("%s%s", meta.BaseURL, NativeAnthropicEndpoint), nil
}
// For third-party providers supporting Anthropic protocol (like DeepSeek)
// They typically expose the endpoint at /anthropic/v1/messages
baseURL := strings.TrimSuffix(meta.BaseURL, "/")
return fmt.Sprintf("%s%s", baseURL, ThirdPartyAnthropicEndpoint), nil
} }
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *meta.Meta) error { func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *meta.Meta) error {
@@ -47,6 +63,15 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
if request == nil { if request == nil {
return nil, errors.New("request is nil") return nil, errors.New("request is nil")
} }
// For native Anthropic protocol requests, return the request as-is (no conversion needed)
if relayMode == relaymode.AnthropicMessages {
// The request should already be in Anthropic format, so we pass it through
// This will be handled by the caller which already has the anthropic request
return request, nil
}
// For OpenAI to Anthropic conversion (existing functionality)
return ConvertRequest(*request), nil return ConvertRequest(*request), nil
} }
@@ -62,6 +87,17 @@ func (a *Adaptor) DoRequest(c *gin.Context, meta *meta.Meta, requestBody io.Read
} }
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode) { func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
// For native Anthropic protocol requests, handle response directly without conversion
if meta.Mode == relaymode.AnthropicMessages {
if meta.IsStream {
err, usage = DirectStreamHandler(c, resp)
} else {
err, usage = DirectHandler(c, resp, meta.PromptTokens, meta.ActualModelName)
}
return
}
// For OpenAI to Anthropic conversion (existing functionality)
if meta.IsStream { if meta.IsStream {
err, usage = StreamHandler(c, resp) err, usage = StreamHandler(c, resp)
} else { } else {

View File

@@ -4,11 +4,12 @@ import (
"bufio" "bufio"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/songquanpeng/one-api/common/render"
"io" "io"
"net/http" "net/http"
"strings" "strings"
"github.com/songquanpeng/one-api/common/render"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common" "github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/helper" "github.com/songquanpeng/one-api/common/helper"
@@ -89,8 +90,18 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
claudeRequest.Model = "claude-2.1" claudeRequest.Model = "claude-2.1"
} }
for _, message := range textRequest.Messages { for _, message := range textRequest.Messages {
if message.Role == "system" && claudeRequest.System == "" { if message.Role == "system" && claudeRequest.System.IsEmpty() {
claudeRequest.System = message.StringContent() // Create a SystemPrompt from the string content
systemPrompt := SystemPrompt{}
systemData, err := json.Marshal(message.StringContent()) // Safely escape string for JSON
if err != nil {
logger.SysError(fmt.Sprintf("Failed to marshal system prompt: %v", err))
} else {
if err := systemPrompt.UnmarshalJSON(systemData); err != nil {
logger.SysError(fmt.Sprintf("Failed to unmarshal system prompt: %v", err))
}
claudeRequest.System = systemPrompt
}
continue continue
} }
claudeMessage := Message{ claudeMessage := Message{
@@ -377,3 +388,128 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName st
_, err = c.Writer.Write(jsonResponse) _, err = c.Writer.Write(jsonResponse)
return nil, &usage return nil, &usage
} }
// DirectHandler handles native Anthropic API responses without conversion to OpenAI format
func DirectHandler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) {
ctx := c.Request.Context()
logger.Debugf(ctx, "DirectHandler - Response status: %d", resp.StatusCode)
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
logger.Errorf(ctx, "Failed to read response body: %s", err.Error())
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
}
err = resp.Body.Close()
if err != nil {
logger.Errorf(ctx, "Failed to close response body: %s", err.Error())
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
}
logger.Debugf(ctx, "Raw response body: %s", string(responseBody))
var claudeResponse Response
err = json.Unmarshal(responseBody, &claudeResponse)
if err != nil {
logger.Errorf(ctx, "Failed to unmarshal response: %s", err.Error())
// If we can't parse as Anthropic response, maybe it's an error response
// Let's try to write it directly and see what happens
c.Writer.Header().Set("Content-Type", "application/json")
c.Writer.WriteHeader(resp.StatusCode)
_, writeErr := c.Writer.Write(responseBody)
if writeErr != nil {
logger.Errorf(ctx, "Failed to write raw response: %s", writeErr.Error())
return openai.ErrorWrapper(writeErr, "write_response_failed", http.StatusInternalServerError), nil
}
// Return a minimal usage for tracking
usage := &model.Usage{PromptTokens: promptTokens, CompletionTokens: 0, TotalTokens: promptTokens}
return nil, usage
}
logger.Debugf(ctx, "Parsed response - ID: %s, Model: %s, Usage: %+v",
claudeResponse.Id, claudeResponse.Model, claudeResponse.Usage)
if claudeResponse.Error.Type != "" {
logger.Errorf(ctx, "Anthropic API error: %s - %s", claudeResponse.Error.Type, claudeResponse.Error.Message)
return &model.ErrorWithStatusCode{
Error: model.Error{
Message: claudeResponse.Error.Message,
Type: claudeResponse.Error.Type,
Param: "",
Code: claudeResponse.Error.Type,
},
StatusCode: resp.StatusCode,
}, nil
}
// For direct mode, return the response as-is without conversion
usage := model.Usage{
PromptTokens: claudeResponse.Usage.InputTokens,
CompletionTokens: claudeResponse.Usage.OutputTokens,
TotalTokens: claudeResponse.Usage.InputTokens + claudeResponse.Usage.OutputTokens,
}
logger.Debugf(ctx, "Usage calculated: %+v", usage)
// Write the original Anthropic response directly
c.Writer.Header().Set("Content-Type", "application/json")
c.Writer.WriteHeader(resp.StatusCode)
_, err = c.Writer.Write(responseBody)
if err != nil {
logger.Errorf(ctx, "Failed to write response: %s", err.Error())
return openai.ErrorWrapper(err, "write_response_failed", http.StatusInternalServerError), nil
}
logger.Debugf(ctx, "Response written successfully")
return nil, &usage
}
// DirectStreamHandler handles native Anthropic API streaming responses without conversion
func DirectStreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
defer resp.Body.Close()
// Set headers for streaming
c.Writer.Header().Set("Content-Type", "text/event-stream")
c.Writer.Header().Set("Cache-Control", "no-cache")
c.Writer.Header().Set("Connection", "keep-alive")
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type")
c.Writer.WriteHeader(resp.StatusCode)
// Stream the response directly without conversion
var usage model.Usage
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
data := scanner.Text()
if len(data) < 6 || !strings.HasPrefix(data, "data:") {
continue
}
// Parse usage information if available
if strings.Contains(data, "\"usage\":") {
var eventData map[string]interface{}
jsonData := strings.TrimPrefix(data, "data:")
jsonData = strings.TrimSpace(jsonData)
if err := json.Unmarshal([]byte(jsonData), &eventData); err == nil {
if usageData, ok := eventData["usage"].(map[string]interface{}); ok {
if inputTokens, ok := usageData["input_tokens"].(float64); ok {
usage.PromptTokens = int(inputTokens)
}
if outputTokens, ok := usageData["output_tokens"].(float64); ok {
usage.CompletionTokens = int(outputTokens)
usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
}
}
}
}
// Write data directly to the response
c.Writer.WriteString(data + "\n")
c.Writer.Flush()
}
if err := scanner.Err(); err != nil {
return openai.ErrorWrapper(err, "stream_read_failed", http.StatusInternalServerError), nil
}
return nil, &usage
}

View File

@@ -1,5 +1,10 @@
package anthropic package anthropic
import (
"encoding/json"
"fmt"
)
// https://docs.anthropic.com/claude/reference/messages_post // https://docs.anthropic.com/claude/reference/messages_post
type Metadata struct { type Metadata struct {
@@ -41,18 +46,92 @@ type InputSchema struct {
Required any `json:"required,omitempty"` Required any `json:"required,omitempty"`
} }
// SystemPrompt can handle both string and array formats for the system field
type SystemPrompt struct {
value interface{}
}
// UnmarshalJSON implements json.Unmarshaler to handle both string and array formats
func (s *SystemPrompt) UnmarshalJSON(data []byte) error {
// Try to unmarshal as string first
var str string
if err := json.Unmarshal(data, &str); err == nil {
s.value = str
return nil
}
// If that fails, try to unmarshal as array
var arr []interface{}
if err := json.Unmarshal(data, &arr); err == nil {
s.value = arr
return nil
}
return fmt.Errorf("system field must be either a string or an array")
}
// MarshalJSON implements json.Marshaler
func (s SystemPrompt) MarshalJSON() ([]byte, error) {
return json.Marshal(s.value)
}
// String returns the system prompt as a string
func (s SystemPrompt) String() string {
if s.value == nil {
return ""
}
switch v := s.value.(type) {
case string:
return v
case []interface{}:
// Convert array to string by concatenating text content
var result string
for _, item := range v {
if itemMap, ok := item.(map[string]interface{}); ok {
if text, exists := itemMap["text"]; exists {
if textStr, ok := text.(string); ok {
result += textStr + " "
}
}
} else if str, ok := item.(string); ok {
result += str + " "
}
}
return result
default:
return fmt.Sprintf("%v", v)
}
}
// IsEmpty returns true if the system prompt is empty
func (s SystemPrompt) IsEmpty() bool {
if s.value == nil {
return true
}
switch v := s.value.(type) {
case string:
return v == ""
case []interface{}:
return len(v) == 0
default:
return false
}
}
type Request struct { type Request struct {
Model string `json:"model"` Model string `json:"model"`
Messages []Message `json:"messages"` Messages []Message `json:"messages"`
System string `json:"system,omitempty"` System SystemPrompt `json:"system,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"` MaxTokens int `json:"max_tokens,omitempty"`
StopSequences []string `json:"stop_sequences,omitempty"` StopSequences []string `json:"stop_sequences,omitempty"`
Stream bool `json:"stream,omitempty"` Stream bool `json:"stream,omitempty"`
Temperature *float64 `json:"temperature,omitempty"` Temperature *float64 `json:"temperature,omitempty"`
TopP *float64 `json:"top_p,omitempty"` TopP *float64 `json:"top_p,omitempty"`
TopK int `json:"top_k,omitempty"` TopK int `json:"top_k,omitempty"`
Tools []Tool `json:"tools,omitempty"` Tools []Tool `json:"tools,omitempty"`
ToolChoice any `json:"tool_choice,omitempty"` ToolChoice any `json:"tool_choice,omitempty"`
//Metadata `json:"metadata,omitempty"` //Metadata `json:"metadata,omitempty"`
} }

View File

@@ -1,20 +1,235 @@
package openrouter package openrouter
var ModelList = []string{ var ModelList = []string{
"openai/gpt-3.5-turbo", "01-ai/yi-large",
"openai/chatgpt-4o-latest", "aetherwiing/mn-starcannon-12b",
"openai/o1", "ai21/jamba-1-5-large",
"openai/o1-preview", "ai21/jamba-1-5-mini",
"openai/o1-mini", "ai21/jamba-instruct",
"openai/o3-mini", "aion-labs/aion-1.0",
"google/gemini-2.0-flash-001", "aion-labs/aion-1.0-mini",
"google/gemini-2.0-flash-thinking-exp:free", "aion-labs/aion-rp-llama-3.1-8b",
"google/gemini-2.0-flash-lite-preview-02-05:free", "allenai/llama-3.1-tulu-3-405b",
"google/gemini-2.0-pro-exp-02-05:free", "alpindale/goliath-120b",
"google/gemini-flash-1.5-8b", "alpindale/magnum-72b",
"anthropic/claude-3.5-sonnet", "amazon/nova-lite-v1",
"amazon/nova-micro-v1",
"amazon/nova-pro-v1",
"anthracite-org/magnum-v2-72b",
"anthracite-org/magnum-v4-72b",
"anthropic/claude-2",
"anthropic/claude-2.0",
"anthropic/claude-2.0:beta",
"anthropic/claude-2.1",
"anthropic/claude-2.1:beta",
"anthropic/claude-2:beta",
"anthropic/claude-3-haiku",
"anthropic/claude-3-haiku:beta",
"anthropic/claude-3-opus",
"anthropic/claude-3-opus:beta",
"anthropic/claude-3-sonnet",
"anthropic/claude-3-sonnet:beta",
"anthropic/claude-3.5-haiku", "anthropic/claude-3.5-haiku",
"deepseek/deepseek-r1:free", "anthropic/claude-3.5-haiku-20241022",
"anthropic/claude-3.5-haiku-20241022:beta",
"anthropic/claude-3.5-haiku:beta",
"anthropic/claude-3.5-sonnet",
"anthropic/claude-3.5-sonnet-20240620",
"anthropic/claude-3.5-sonnet-20240620:beta",
"anthropic/claude-3.5-sonnet:beta",
"cognitivecomputations/dolphin-mixtral-8x22b",
"cognitivecomputations/dolphin-mixtral-8x7b",
"cohere/command",
"cohere/command-r",
"cohere/command-r-03-2024",
"cohere/command-r-08-2024",
"cohere/command-r-plus",
"cohere/command-r-plus-04-2024",
"cohere/command-r-plus-08-2024",
"cohere/command-r7b-12-2024",
"databricks/dbrx-instruct",
"deepseek/deepseek-chat",
"deepseek/deepseek-chat-v2.5",
"deepseek/deepseek-chat:free",
"deepseek/deepseek-r1", "deepseek/deepseek-r1",
"deepseek/deepseek-r1-distill-llama-70b",
"deepseek/deepseek-r1-distill-llama-70b:free",
"deepseek/deepseek-r1-distill-llama-8b",
"deepseek/deepseek-r1-distill-qwen-1.5b",
"deepseek/deepseek-r1-distill-qwen-14b",
"deepseek/deepseek-r1-distill-qwen-32b",
"deepseek/deepseek-r1:free",
"eva-unit-01/eva-llama-3.33-70b",
"eva-unit-01/eva-qwen-2.5-32b",
"eva-unit-01/eva-qwen-2.5-72b",
"google/gemini-2.0-flash-001",
"google/gemini-2.0-flash-exp:free",
"google/gemini-2.0-flash-lite-preview-02-05:free",
"google/gemini-2.0-flash-thinking-exp-1219:free",
"google/gemini-2.0-flash-thinking-exp:free",
"google/gemini-2.0-pro-exp-02-05:free",
"google/gemini-exp-1206:free",
"google/gemini-flash-1.5",
"google/gemini-flash-1.5-8b",
"google/gemini-flash-1.5-8b-exp",
"google/gemini-pro",
"google/gemini-pro-1.5",
"google/gemini-pro-vision",
"google/gemma-2-27b-it",
"google/gemma-2-9b-it",
"google/gemma-2-9b-it:free",
"google/gemma-7b-it",
"google/learnlm-1.5-pro-experimental:free",
"google/palm-2-chat-bison",
"google/palm-2-chat-bison-32k",
"google/palm-2-codechat-bison",
"google/palm-2-codechat-bison-32k",
"gryphe/mythomax-l2-13b",
"gryphe/mythomax-l2-13b:free",
"huggingfaceh4/zephyr-7b-beta:free",
"infermatic/mn-inferor-12b",
"inflection/inflection-3-pi",
"inflection/inflection-3-productivity",
"jondurbin/airoboros-l2-70b",
"liquid/lfm-3b",
"liquid/lfm-40b",
"liquid/lfm-7b",
"mancer/weaver",
"meta-llama/llama-2-13b-chat",
"meta-llama/llama-2-70b-chat",
"meta-llama/llama-3-70b-instruct",
"meta-llama/llama-3-8b-instruct",
"meta-llama/llama-3-8b-instruct:free",
"meta-llama/llama-3.1-405b",
"meta-llama/llama-3.1-405b-instruct",
"meta-llama/llama-3.1-70b-instruct",
"meta-llama/llama-3.1-8b-instruct",
"meta-llama/llama-3.2-11b-vision-instruct",
"meta-llama/llama-3.2-11b-vision-instruct:free",
"meta-llama/llama-3.2-1b-instruct",
"meta-llama/llama-3.2-3b-instruct",
"meta-llama/llama-3.2-90b-vision-instruct",
"meta-llama/llama-3.3-70b-instruct",
"meta-llama/llama-3.3-70b-instruct:free",
"meta-llama/llama-guard-2-8b",
"microsoft/phi-3-medium-128k-instruct",
"microsoft/phi-3-medium-128k-instruct:free",
"microsoft/phi-3-mini-128k-instruct",
"microsoft/phi-3-mini-128k-instruct:free",
"microsoft/phi-3.5-mini-128k-instruct",
"microsoft/phi-4",
"microsoft/wizardlm-2-7b",
"microsoft/wizardlm-2-8x22b",
"minimax/minimax-01",
"mistralai/codestral-2501",
"mistralai/codestral-mamba",
"mistralai/ministral-3b",
"mistralai/ministral-8b",
"mistralai/mistral-7b-instruct",
"mistralai/mistral-7b-instruct-v0.1",
"mistralai/mistral-7b-instruct-v0.3",
"mistralai/mistral-7b-instruct:free",
"mistralai/mistral-large",
"mistralai/mistral-large-2407",
"mistralai/mistral-large-2411",
"mistralai/mistral-medium",
"mistralai/mistral-nemo",
"mistralai/mistral-nemo:free",
"mistralai/mistral-small",
"mistralai/mistral-small-24b-instruct-2501",
"mistralai/mistral-small-24b-instruct-2501:free",
"mistralai/mistral-tiny",
"mistralai/mixtral-8x22b-instruct",
"mistralai/mixtral-8x7b",
"mistralai/mixtral-8x7b-instruct",
"mistralai/pixtral-12b",
"mistralai/pixtral-large-2411",
"neversleep/llama-3-lumimaid-70b",
"neversleep/llama-3-lumimaid-8b",
"neversleep/llama-3-lumimaid-8b:extended",
"neversleep/llama-3.1-lumimaid-70b",
"neversleep/llama-3.1-lumimaid-8b",
"neversleep/noromaid-20b",
"nothingiisreal/mn-celeste-12b",
"nousresearch/hermes-2-pro-llama-3-8b",
"nousresearch/hermes-3-llama-3.1-405b",
"nousresearch/hermes-3-llama-3.1-70b",
"nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
"nousresearch/nous-hermes-llama2-13b",
"nvidia/llama-3.1-nemotron-70b-instruct",
"nvidia/llama-3.1-nemotron-70b-instruct:free",
"openai/chatgpt-4o-latest",
"openai/gpt-3.5-turbo",
"openai/gpt-3.5-turbo-0125",
"openai/gpt-3.5-turbo-0613",
"openai/gpt-3.5-turbo-1106",
"openai/gpt-3.5-turbo-16k",
"openai/gpt-3.5-turbo-instruct",
"openai/gpt-4",
"openai/gpt-4-0314",
"openai/gpt-4-1106-preview",
"openai/gpt-4-32k",
"openai/gpt-4-32k-0314",
"openai/gpt-4-turbo",
"openai/gpt-4-turbo-preview",
"openai/gpt-4o",
"openai/gpt-4o-2024-05-13",
"openai/gpt-4o-2024-08-06",
"openai/gpt-4o-2024-11-20",
"openai/gpt-4o-mini",
"openai/gpt-4o-mini-2024-07-18",
"openai/gpt-4o:extended",
"openai/o1",
"openai/o1-mini",
"openai/o1-mini-2024-09-12",
"openai/o1-preview",
"openai/o1-preview-2024-09-12",
"openai/o3-mini",
"openai/o3-mini-high",
"openchat/openchat-7b",
"openchat/openchat-7b:free",
"openrouter/auto",
"perplexity/llama-3.1-sonar-huge-128k-online",
"perplexity/llama-3.1-sonar-large-128k-chat",
"perplexity/llama-3.1-sonar-large-128k-online",
"perplexity/llama-3.1-sonar-small-128k-chat",
"perplexity/llama-3.1-sonar-small-128k-online",
"perplexity/sonar",
"perplexity/sonar-reasoning",
"pygmalionai/mythalion-13b",
"qwen/qvq-72b-preview",
"qwen/qwen-2-72b-instruct",
"qwen/qwen-2-7b-instruct",
"qwen/qwen-2-7b-instruct:free",
"qwen/qwen-2-vl-72b-instruct",
"qwen/qwen-2-vl-7b-instruct",
"qwen/qwen-2.5-72b-instruct",
"qwen/qwen-2.5-7b-instruct",
"qwen/qwen-2.5-coder-32b-instruct",
"qwen/qwen-max",
"qwen/qwen-plus",
"qwen/qwen-turbo",
"qwen/qwen-vl-plus:free", "qwen/qwen-vl-plus:free",
"qwen/qwen2.5-vl-72b-instruct:free",
"qwen/qwq-32b-preview",
"raifle/sorcererlm-8x22b",
"sao10k/fimbulvetr-11b-v2",
"sao10k/l3-euryale-70b",
"sao10k/l3-lunaris-8b",
"sao10k/l3.1-70b-hanami-x1",
"sao10k/l3.1-euryale-70b",
"sao10k/l3.3-euryale-70b",
"sophosympatheia/midnight-rose-70b",
"sophosympatheia/rogue-rose-103b-v0.2:free",
"teknium/openhermes-2.5-mistral-7b",
"thedrummer/rocinante-12b",
"thedrummer/unslopnemo-12b",
"undi95/remm-slerp-l2-13b",
"undi95/toppy-m-7b",
"undi95/toppy-m-7b:free",
"x-ai/grok-2-1212",
"x-ai/grok-2-vision-1212",
"x-ai/grok-beta",
"x-ai/grok-vision-beta",
"xwin-lm/xwin-lm-70b",
} }

View File

@@ -36,7 +36,7 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
AnthropicVersion: anthropicVersion, AnthropicVersion: anthropicVersion,
// Model: claudeReq.Model, // Model: claudeReq.Model,
Messages: claudeReq.Messages, Messages: claudeReq.Messages,
System: claudeReq.System, System: claudeReq.System.String(), // Convert SystemPrompt to string
MaxTokens: claudeReq.MaxTokens, MaxTokens: claudeReq.MaxTokens,
Temperature: claudeReq.Temperature, Temperature: claudeReq.Temperature,
TopP: claudeReq.TopP, TopP: claudeReq.TopP,

View File

@@ -59,6 +59,8 @@ var ModelRatio = map[string]float64{
"o1-preview-2024-09-12": 7.5, "o1-preview-2024-09-12": 7.5,
"o1-mini": 1.5, // $3.00 / 1M input tokens "o1-mini": 1.5, // $3.00 / 1M input tokens
"o1-mini-2024-09-12": 1.5, "o1-mini-2024-09-12": 1.5,
"o3-mini": 1.5, // $3.00 / 1M input tokens
"o3-mini-2025-01-31": 1.5,
"davinci-002": 1, // $0.002 / 1K tokens "davinci-002": 1, // $0.002 / 1K tokens
"babbage-002": 0.2, // $0.0004 / 1K tokens "babbage-002": 0.2, // $0.0004 / 1K tokens
"text-ada-001": 0.2, "text-ada-001": 0.2,
@@ -159,91 +161,105 @@ var ModelRatio = map[string]float64{
"embedding-2": 0.0005 * RMB, "embedding-2": 0.0005 * RMB,
"embedding-3": 0.0005 * RMB, "embedding-3": 0.0005 * RMB,
// https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-thousand-questions-metering-and-billing // https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-thousand-questions-metering-and-billing
"qwen-turbo": 1.4286, // ¥0.02 / 1k tokens "qwen-turbo": 0.0003 * RMB,
"qwen-turbo-latest": 1.4286, "qwen-turbo-latest": 0.0003 * RMB,
"qwen-plus": 1.4286, "qwen-plus": 0.0008 * RMB,
"qwen-plus-latest": 1.4286, "qwen-plus-latest": 0.0008 * RMB,
"qwen-max": 1.4286, "qwen-max": 0.0024 * RMB,
"qwen-max-latest": 1.4286, "qwen-max-latest": 0.0024 * RMB,
"qwen-max-longcontext": 1.4286, "qwen-max-longcontext": 0.0005 * RMB,
"qwen-vl-max": 1.4286, "qwen-vl-max": 0.003 * RMB,
"qwen-vl-max-latest": 1.4286, "qwen-vl-max-latest": 0.003 * RMB,
"qwen-vl-plus": 1.4286, "qwen-vl-plus": 0.0015 * RMB,
"qwen-vl-plus-latest": 1.4286, "qwen-vl-plus-latest": 0.0015 * RMB,
"qwen-vl-ocr": 1.4286, "qwen-vl-ocr": 0.005 * RMB,
"qwen-vl-ocr-latest": 1.4286, "qwen-vl-ocr-latest": 0.005 * RMB,
"qwen-audio-turbo": 1.4286, "qwen-audio-turbo": 1.4286,
"qwen-math-plus": 1.4286, "qwen-math-plus": 0.004 * RMB,
"qwen-math-plus-latest": 1.4286, "qwen-math-plus-latest": 0.004 * RMB,
"qwen-math-turbo": 1.4286, "qwen-math-turbo": 0.002 * RMB,
"qwen-math-turbo-latest": 1.4286, "qwen-math-turbo-latest": 0.002 * RMB,
"qwen-coder-plus": 1.4286, "qwen-coder-plus": 0.0035 * RMB,
"qwen-coder-plus-latest": 1.4286, "qwen-coder-plus-latest": 0.0035 * RMB,
"qwen-coder-turbo": 1.4286, "qwen-coder-turbo": 0.002 * RMB,
"qwen-coder-turbo-latest": 1.4286, "qwen-coder-turbo-latest": 0.002 * RMB,
"qwq-32b-preview": 1.4286, "qwen-mt-plus": 0.015 * RMB,
"qwen2.5-72b-instruct": 1.4286, "qwen-mt-turbo": 0.001 * RMB,
"qwen2.5-32b-instruct": 1.4286, "qwq-32b-preview": 0.002 * RMB,
"qwen2.5-14b-instruct": 1.4286, "qwen2.5-72b-instruct": 0.004 * RMB,
"qwen2.5-7b-instruct": 1.4286, "qwen2.5-32b-instruct": 0.03 * RMB,
"qwen2.5-3b-instruct": 1.4286, "qwen2.5-14b-instruct": 0.001 * RMB,
"qwen2.5-1.5b-instruct": 1.4286, "qwen2.5-7b-instruct": 0.0005 * RMB,
"qwen2.5-0.5b-instruct": 1.4286, "qwen2.5-3b-instruct": 0.006 * RMB,
"qwen2-72b-instruct": 1.4286, "qwen2.5-1.5b-instruct": 0.0003 * RMB,
"qwen2-57b-a14b-instruct": 1.4286, "qwen2.5-0.5b-instruct": 0.0003 * RMB,
"qwen2-7b-instruct": 1.4286, "qwen2-72b-instruct": 0.004 * RMB,
"qwen2-1.5b-instruct": 1.4286, "qwen2-57b-a14b-instruct": 0.0035 * RMB,
"qwen2-0.5b-instruct": 1.4286, "qwen2-7b-instruct": 0.001 * RMB,
"qwen1.5-110b-chat": 1.4286, "qwen2-1.5b-instruct": 0.001 * RMB,
"qwen1.5-72b-chat": 1.4286, "qwen2-0.5b-instruct": 0.001 * RMB,
"qwen1.5-32b-chat": 1.4286, "qwen1.5-110b-chat": 0.007 * RMB,
"qwen1.5-14b-chat": 1.4286, "qwen1.5-72b-chat": 0.005 * RMB,
"qwen1.5-7b-chat": 1.4286, "qwen1.5-32b-chat": 0.0035 * RMB,
"qwen1.5-1.8b-chat": 1.4286, "qwen1.5-14b-chat": 0.002 * RMB,
"qwen1.5-0.5b-chat": 1.4286, "qwen1.5-7b-chat": 0.001 * RMB,
"qwen-72b-chat": 1.4286, "qwen1.5-1.8b-chat": 0.001 * RMB,
"qwen-14b-chat": 1.4286, "qwen1.5-0.5b-chat": 0.001 * RMB,
"qwen-7b-chat": 1.4286, "qwen-72b-chat": 0.02 * RMB,
"qwen-1.8b-chat": 1.4286, "qwen-14b-chat": 0.008 * RMB,
"qwen-1.8b-longcontext-chat": 1.4286, "qwen-7b-chat": 0.006 * RMB,
"qwen2-vl-7b-instruct": 1.4286, "qwen-1.8b-chat": 0.006 * RMB,
"qwen2-vl-2b-instruct": 1.4286, "qwen-1.8b-longcontext-chat": 0.006 * RMB,
"qwen-vl-v1": 1.4286, "qvq-72b-preview": 0.012 * RMB,
"qwen-vl-chat-v1": 1.4286, "qwen2.5-vl-72b-instruct": 0.016 * RMB,
"qwen2-audio-instruct": 1.4286, "qwen2.5-vl-7b-instruct": 0.002 * RMB,
"qwen-audio-chat": 1.4286, "qwen2.5-vl-3b-instruct": 0.0012 * RMB,
"qwen2.5-math-72b-instruct": 1.4286, "qwen2-vl-7b-instruct": 0.016 * RMB,
"qwen2.5-math-7b-instruct": 1.4286, "qwen2-vl-2b-instruct": 0.002 * RMB,
"qwen2.5-math-1.5b-instruct": 1.4286, "qwen-vl-v1": 0.002 * RMB,
"qwen2-math-72b-instruct": 1.4286, "qwen-vl-chat-v1": 0.002 * RMB,
"qwen2-math-7b-instruct": 1.4286, "qwen2-audio-instruct": 0.002 * RMB,
"qwen2-math-1.5b-instruct": 1.4286, "qwen-audio-chat": 0.002 * RMB,
"qwen2.5-coder-32b-instruct": 1.4286, "qwen2.5-math-72b-instruct": 0.004 * RMB,
"qwen2.5-coder-14b-instruct": 1.4286, "qwen2.5-math-7b-instruct": 0.001 * RMB,
"qwen2.5-coder-7b-instruct": 1.4286, "qwen2.5-math-1.5b-instruct": 0.001 * RMB,
"qwen2.5-coder-3b-instruct": 1.4286, "qwen2-math-72b-instruct": 0.004 * RMB,
"qwen2.5-coder-1.5b-instruct": 1.4286, "qwen2-math-7b-instruct": 0.001 * RMB,
"qwen2.5-coder-0.5b-instruct": 1.4286, "qwen2-math-1.5b-instruct": 0.001 * RMB,
"text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens "qwen2.5-coder-32b-instruct": 0.002 * RMB,
"text-embedding-v3": 0.05, "qwen2.5-coder-14b-instruct": 0.002 * RMB,
"text-embedding-v2": 0.05, "qwen2.5-coder-7b-instruct": 0.001 * RMB,
"text-embedding-async-v2": 0.05, "qwen2.5-coder-3b-instruct": 0.001 * RMB,
"text-embedding-async-v1": 0.05, "qwen2.5-coder-1.5b-instruct": 0.001 * RMB,
"ali-stable-diffusion-xl": 8.00, "qwen2.5-coder-0.5b-instruct": 0.001 * RMB,
"ali-stable-diffusion-v1.5": 8.00, "text-embedding-v1": 0.0007 * RMB, // ¥0.0007 / 1k tokens
"wanx-v1": 8.00, "text-embedding-v3": 0.0007 * RMB,
"SparkDesk": 1.2858, // ¥0.018 / 1k tokens "text-embedding-v2": 0.0007 * RMB,
"SparkDesk-v1.1": 1.2858, // ¥0.018 / 1k tokens "text-embedding-async-v2": 0.0007 * RMB,
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens "text-embedding-async-v1": 0.0007 * RMB,
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens "ali-stable-diffusion-xl": 8.00,
"SparkDesk-v3.1-128K": 1.2858, // ¥0.018 / 1k tokens "ali-stable-diffusion-v1.5": 8.00,
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens "wanx-v1": 8.00,
"SparkDesk-v3.5-32K": 1.2858, // ¥0.018 / 1k tokens "deepseek-r1": 0.002 * RMB,
"SparkDesk-v4.0": 1.2858, // ¥0.018 / 1k tokens "deepseek-v3": 0.001 * RMB,
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens "deepseek-r1-distill-qwen-1.5b": 0.001 * RMB,
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens "deepseek-r1-distill-qwen-7b": 0.0005 * RMB,
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens "deepseek-r1-distill-qwen-14b": 0.001 * RMB,
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens "deepseek-r1-distill-qwen-32b": 0.002 * RMB,
"deepseek-r1-distill-llama-8b": 0.0005 * RMB,
"deepseek-r1-distill-llama-70b": 0.004 * RMB,
"SparkDesk": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v1.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.1-128K": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.5-32K": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v4.0": 1.2858, // ¥0.018 / 1k tokens
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens
// https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0 // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
"hunyuan-turbo": 0.015 * RMB, "hunyuan-turbo": 0.015 * RMB,
"hunyuan-large": 0.004 * RMB, "hunyuan-large": 0.004 * RMB,
@@ -371,6 +387,238 @@ var ModelRatio = map[string]float64{
"mistralai/mistral-7b-instruct-v0.2": 0.050 * USD, "mistralai/mistral-7b-instruct-v0.2": 0.050 * USD,
"mistralai/mistral-7b-v0.1": 0.050 * USD, "mistralai/mistral-7b-v0.1": 0.050 * USD,
"mistralai/mixtral-8x7b-instruct-v0.1": 0.300 * USD, "mistralai/mixtral-8x7b-instruct-v0.1": 0.300 * USD,
//https://openrouter.ai/models
"01-ai/yi-large": 1.5,
"aetherwiing/mn-starcannon-12b": 0.6,
"ai21/jamba-1-5-large": 4.0,
"ai21/jamba-1-5-mini": 0.2,
"ai21/jamba-instruct": 0.35,
"aion-labs/aion-1.0": 6.0,
"aion-labs/aion-1.0-mini": 1.2,
"aion-labs/aion-rp-llama-3.1-8b": 0.1,
"allenai/llama-3.1-tulu-3-405b": 5.0,
"alpindale/goliath-120b": 4.6875,
"alpindale/magnum-72b": 1.125,
"amazon/nova-lite-v1": 0.12,
"amazon/nova-micro-v1": 0.07,
"amazon/nova-pro-v1": 1.6,
"anthracite-org/magnum-v2-72b": 1.5,
"anthracite-org/magnum-v4-72b": 1.125,
"anthropic/claude-2": 12.0,
"anthropic/claude-2.0": 12.0,
"anthropic/claude-2.0:beta": 12.0,
"anthropic/claude-2.1": 12.0,
"anthropic/claude-2.1:beta": 12.0,
"anthropic/claude-2:beta": 12.0,
"anthropic/claude-3-haiku": 0.625,
"anthropic/claude-3-haiku:beta": 0.625,
"anthropic/claude-3-opus": 37.5,
"anthropic/claude-3-opus:beta": 37.5,
"anthropic/claude-3-sonnet": 7.5,
"anthropic/claude-3-sonnet:beta": 7.5,
"anthropic/claude-3.5-haiku": 2.0,
"anthropic/claude-3.5-haiku-20241022": 2.0,
"anthropic/claude-3.5-haiku-20241022:beta": 2.0,
"anthropic/claude-3.5-haiku:beta": 2.0,
"anthropic/claude-3.5-sonnet": 7.5,
"anthropic/claude-3.5-sonnet-20240620": 7.5,
"anthropic/claude-3.5-sonnet-20240620:beta": 7.5,
"anthropic/claude-3.5-sonnet:beta": 7.5,
"cognitivecomputations/dolphin-mixtral-8x22b": 0.45,
"cognitivecomputations/dolphin-mixtral-8x7b": 0.25,
"cohere/command": 0.95,
"cohere/command-r": 0.7125,
"cohere/command-r-03-2024": 0.7125,
"cohere/command-r-08-2024": 0.285,
"cohere/command-r-plus": 7.125,
"cohere/command-r-plus-04-2024": 7.125,
"cohere/command-r-plus-08-2024": 4.75,
"cohere/command-r7b-12-2024": 0.075,
"databricks/dbrx-instruct": 0.6,
"deepseek/deepseek-chat": 0.445,
"deepseek/deepseek-chat-v2.5": 1.0,
"deepseek/deepseek-chat:free": 0.0,
"deepseek/deepseek-r1": 1.2,
"deepseek/deepseek-r1-distill-llama-70b": 0.345,
"deepseek/deepseek-r1-distill-llama-70b:free": 0.0,
"deepseek/deepseek-r1-distill-llama-8b": 0.02,
"deepseek/deepseek-r1-distill-qwen-1.5b": 0.09,
"deepseek/deepseek-r1-distill-qwen-14b": 0.075,
"deepseek/deepseek-r1-distill-qwen-32b": 0.09,
"deepseek/deepseek-r1:free": 0.0,
"eva-unit-01/eva-llama-3.33-70b": 3.0,
"eva-unit-01/eva-qwen-2.5-32b": 1.7,
"eva-unit-01/eva-qwen-2.5-72b": 3.0,
"google/gemini-2.0-flash-001": 0.2,
"google/gemini-2.0-flash-exp:free": 0.0,
"google/gemini-2.0-flash-lite-preview-02-05:free": 0.0,
"google/gemini-2.0-flash-thinking-exp-1219:free": 0.0,
"google/gemini-2.0-flash-thinking-exp:free": 0.0,
"google/gemini-2.0-pro-exp-02-05:free": 0.0,
"google/gemini-exp-1206:free": 0.0,
"google/gemini-flash-1.5": 0.15,
"google/gemini-flash-1.5-8b": 0.075,
"google/gemini-flash-1.5-8b-exp": 0.0,
"google/gemini-pro": 0.75,
"google/gemini-pro-1.5": 2.5,
"google/gemini-pro-vision": 0.75,
"google/gemma-2-27b-it": 0.135,
"google/gemma-2-9b-it": 0.03,
"google/gemma-2-9b-it:free": 0.0,
"google/gemma-7b-it": 0.075,
"google/learnlm-1.5-pro-experimental:free": 0.0,
"google/palm-2-chat-bison": 1.0,
"google/palm-2-chat-bison-32k": 1.0,
"google/palm-2-codechat-bison": 1.0,
"google/palm-2-codechat-bison-32k": 1.0,
"gryphe/mythomax-l2-13b": 0.0325,
"gryphe/mythomax-l2-13b:free": 0.0,
"huggingfaceh4/zephyr-7b-beta:free": 0.0,
"infermatic/mn-inferor-12b": 0.6,
"inflection/inflection-3-pi": 5.0,
"inflection/inflection-3-productivity": 5.0,
"jondurbin/airoboros-l2-70b": 0.25,
"liquid/lfm-3b": 0.01,
"liquid/lfm-40b": 0.075,
"liquid/lfm-7b": 0.005,
"mancer/weaver": 1.125,
"meta-llama/llama-2-13b-chat": 0.11,
"meta-llama/llama-2-70b-chat": 0.45,
"meta-llama/llama-3-70b-instruct": 0.2,
"meta-llama/llama-3-8b-instruct": 0.03,
"meta-llama/llama-3-8b-instruct:free": 0.0,
"meta-llama/llama-3.1-405b": 1.0,
"meta-llama/llama-3.1-405b-instruct": 0.4,
"meta-llama/llama-3.1-70b-instruct": 0.15,
"meta-llama/llama-3.1-8b-instruct": 0.025,
"meta-llama/llama-3.2-11b-vision-instruct": 0.0275,
"meta-llama/llama-3.2-11b-vision-instruct:free": 0.0,
"meta-llama/llama-3.2-1b-instruct": 0.005,
"meta-llama/llama-3.2-3b-instruct": 0.0125,
"meta-llama/llama-3.2-90b-vision-instruct": 0.8,
"meta-llama/llama-3.3-70b-instruct": 0.15,
"meta-llama/llama-3.3-70b-instruct:free": 0.0,
"meta-llama/llama-guard-2-8b": 0.1,
"microsoft/phi-3-medium-128k-instruct": 0.5,
"microsoft/phi-3-medium-128k-instruct:free": 0.0,
"microsoft/phi-3-mini-128k-instruct": 0.05,
"microsoft/phi-3-mini-128k-instruct:free": 0.0,
"microsoft/phi-3.5-mini-128k-instruct": 0.05,
"microsoft/phi-4": 0.07,
"microsoft/wizardlm-2-7b": 0.035,
"microsoft/wizardlm-2-8x22b": 0.25,
"minimax/minimax-01": 0.55,
"mistralai/codestral-2501": 0.45,
"mistralai/codestral-mamba": 0.125,
"mistralai/ministral-3b": 0.02,
"mistralai/ministral-8b": 0.05,
"mistralai/mistral-7b-instruct": 0.0275,
"mistralai/mistral-7b-instruct-v0.1": 0.1,
"mistralai/mistral-7b-instruct-v0.3": 0.0275,
"mistralai/mistral-7b-instruct:free": 0.0,
"mistralai/mistral-large": 3.0,
"mistralai/mistral-large-2407": 3.0,
"mistralai/mistral-large-2411": 3.0,
"mistralai/mistral-medium": 4.05,
"mistralai/mistral-nemo": 0.04,
"mistralai/mistral-nemo:free": 0.0,
"mistralai/mistral-small": 0.3,
"mistralai/mistral-small-24b-instruct-2501": 0.07,
"mistralai/mistral-small-24b-instruct-2501:free": 0.0,
"mistralai/mistral-tiny": 0.125,
"mistralai/mixtral-8x22b-instruct": 0.45,
"mistralai/mixtral-8x7b": 0.3,
"mistralai/mixtral-8x7b-instruct": 0.12,
"mistralai/pixtral-12b": 0.05,
"mistralai/pixtral-large-2411": 3.0,
"neversleep/llama-3-lumimaid-70b": 2.25,
"neversleep/llama-3-lumimaid-8b": 0.5625,
"neversleep/llama-3-lumimaid-8b:extended": 0.5625,
"neversleep/llama-3.1-lumimaid-70b": 2.25,
"neversleep/llama-3.1-lumimaid-8b": 0.5625,
"neversleep/noromaid-20b": 1.125,
"nothingiisreal/mn-celeste-12b": 0.6,
"nousresearch/hermes-2-pro-llama-3-8b": 0.02,
"nousresearch/hermes-3-llama-3.1-405b": 0.4,
"nousresearch/hermes-3-llama-3.1-70b": 0.15,
"nousresearch/nous-hermes-2-mixtral-8x7b-dpo": 0.3,
"nousresearch/nous-hermes-llama2-13b": 0.085,
"nvidia/llama-3.1-nemotron-70b-instruct": 0.15,
"nvidia/llama-3.1-nemotron-70b-instruct:free": 0.0,
"openai/chatgpt-4o-latest": 7.5,
"openai/gpt-3.5-turbo": 0.75,
"openai/gpt-3.5-turbo-0125": 0.75,
"openai/gpt-3.5-turbo-0613": 1.0,
"openai/gpt-3.5-turbo-1106": 1.0,
"openai/gpt-3.5-turbo-16k": 2.0,
"openai/gpt-3.5-turbo-instruct": 1.0,
"openai/gpt-4": 30.0,
"openai/gpt-4-0314": 30.0,
"openai/gpt-4-1106-preview": 15.0,
"openai/gpt-4-32k": 60.0,
"openai/gpt-4-32k-0314": 60.0,
"openai/gpt-4-turbo": 15.0,
"openai/gpt-4-turbo-preview": 15.0,
"openai/gpt-4o": 5.0,
"openai/gpt-4o-2024-05-13": 7.5,
"openai/gpt-4o-2024-08-06": 5.0,
"openai/gpt-4o-2024-11-20": 5.0,
"openai/gpt-4o-mini": 0.3,
"openai/gpt-4o-mini-2024-07-18": 0.3,
"openai/gpt-4o:extended": 9.0,
"openai/o1": 30.0,
"openai/o1-mini": 2.2,
"openai/o1-mini-2024-09-12": 2.2,
"openai/o1-preview": 30.0,
"openai/o1-preview-2024-09-12": 30.0,
"openai/o3-mini": 2.2,
"openai/o3-mini-high": 2.2,
"openchat/openchat-7b": 0.0275,
"openchat/openchat-7b:free": 0.0,
"openrouter/auto": -500000.0,
"perplexity/llama-3.1-sonar-huge-128k-online": 2.5,
"perplexity/llama-3.1-sonar-large-128k-chat": 0.5,
"perplexity/llama-3.1-sonar-large-128k-online": 0.5,
"perplexity/llama-3.1-sonar-small-128k-chat": 0.1,
"perplexity/llama-3.1-sonar-small-128k-online": 0.1,
"perplexity/sonar": 0.5,
"perplexity/sonar-reasoning": 2.5,
"pygmalionai/mythalion-13b": 0.6,
"qwen/qvq-72b-preview": 0.25,
"qwen/qwen-2-72b-instruct": 0.45,
"qwen/qwen-2-7b-instruct": 0.027,
"qwen/qwen-2-7b-instruct:free": 0.0,
"qwen/qwen-2-vl-72b-instruct": 0.2,
"qwen/qwen-2-vl-7b-instruct": 0.05,
"qwen/qwen-2.5-72b-instruct": 0.2,
"qwen/qwen-2.5-7b-instruct": 0.025,
"qwen/qwen-2.5-coder-32b-instruct": 0.08,
"qwen/qwen-max": 3.2,
"qwen/qwen-plus": 0.6,
"qwen/qwen-turbo": 0.1,
"qwen/qwen-vl-plus:free": 0.0,
"qwen/qwen2.5-vl-72b-instruct:free": 0.0,
"qwen/qwq-32b-preview": 0.09,
"raifle/sorcererlm-8x22b": 2.25,
"sao10k/fimbulvetr-11b-v2": 0.6,
"sao10k/l3-euryale-70b": 0.4,
"sao10k/l3-lunaris-8b": 0.03,
"sao10k/l3.1-70b-hanami-x1": 1.5,
"sao10k/l3.1-euryale-70b": 0.4,
"sao10k/l3.3-euryale-70b": 0.4,
"sophosympatheia/midnight-rose-70b": 0.4,
"sophosympatheia/rogue-rose-103b-v0.2:free": 0.0,
"teknium/openhermes-2.5-mistral-7b": 0.085,
"thedrummer/rocinante-12b": 0.25,
"thedrummer/unslopnemo-12b": 0.25,
"undi95/remm-slerp-l2-13b": 0.6,
"undi95/toppy-m-7b": 0.035,
"undi95/toppy-m-7b:free": 0.0,
"x-ai/grok-2-1212": 5.0,
"x-ai/grok-2-vision-1212": 5.0,
"x-ai/grok-beta": 7.5,
"x-ai/grok-vision-beta": 7.5,
"xwin-lm/xwin-lm-70b": 1.875,
} }
var CompletionRatio = map[string]float64{ var CompletionRatio = map[string]float64{

View File

@@ -0,0 +1,224 @@
package controller
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/logger"
dbmodel "github.com/songquanpeng/one-api/model"
"github.com/songquanpeng/one-api/relay"
"github.com/songquanpeng/one-api/relay/adaptor/anthropic"
"github.com/songquanpeng/one-api/relay/adaptor/openai"
"github.com/songquanpeng/one-api/relay/billing"
billingratio "github.com/songquanpeng/one-api/relay/billing/ratio"
"github.com/songquanpeng/one-api/relay/meta"
"github.com/songquanpeng/one-api/relay/model"
)
// RelayAnthropicHelper handles native Anthropic API requests (anthropic -> anthropic passthrough)
func RelayAnthropicHelper(c *gin.Context) *model.ErrorWithStatusCode {
ctx := c.Request.Context()
meta := meta.GetByContext(c)
logger.Infof(ctx, "Anthropic request received - URL: %s", c.Request.URL.String())
// get & validate anthropic request
anthropicRequest, err := getAndValidateAnthropicRequest(c)
if err != nil {
logger.Errorf(ctx, "getAndValidateAnthropicRequest failed: %s", err.Error())
return openai.ErrorWrapper(err, "invalid_anthropic_request", http.StatusBadRequest)
}
logger.Debugf(ctx, "Parsed anthropic request - Model: %s, Stream: %v, Messages: %d",
anthropicRequest.Model, anthropicRequest.Stream, len(anthropicRequest.Messages))
meta.IsStream = anthropicRequest.Stream
// map model name
meta.OriginModelName = anthropicRequest.Model
mappedModel, _ := getMappedModelName(anthropicRequest.Model, meta.ModelMapping)
anthropicRequest.Model = mappedModel
meta.ActualModelName = anthropicRequest.Model
// estimate token usage for anthropic request
promptTokens := estimateAnthropicTokens(anthropicRequest)
meta.PromptTokens = promptTokens
// get model ratio & group ratio
modelRatio := billingratio.GetModelRatio(anthropicRequest.Model, meta.ChannelType)
groupRatio := billingratio.GetGroupRatio(meta.Group)
ratio := modelRatio * groupRatio
// pre-consume quota
preConsumedQuota, bizErr := preConsumeQuotaForAnthropic(ctx, anthropicRequest, promptTokens, ratio, meta)
if bizErr != nil {
logger.Warnf(ctx, "preConsumeQuota failed: %+v", *bizErr)
return bizErr
}
logger.Debugf(ctx, "Meta info - APIType: %d, ChannelType: %d, BaseURL: %s", meta.APIType, meta.ChannelType, meta.BaseURL)
adaptor := relay.GetAdaptor(meta.APIType)
if adaptor == nil {
logger.Errorf(ctx, "Failed to get adaptor for API type: %d", meta.APIType)
return openai.ErrorWrapper(fmt.Errorf("invalid api type: %d", meta.APIType), "invalid_api_type", http.StatusBadRequest)
}
logger.Debugf(ctx, "Using adaptor: %s", adaptor.GetChannelName())
adaptor.Init(meta)
// get request body - for anthropic passthrough, we directly use the request body
requestBody, err := getAnthropicRequestBody(c, anthropicRequest)
if err != nil {
return openai.ErrorWrapper(err, "convert_anthropic_request_failed", http.StatusInternalServerError)
}
// do request
resp, err := adaptor.DoRequest(c, meta, requestBody)
if err != nil {
logger.Errorf(ctx, "DoRequest failed: %s", err.Error())
return openai.ErrorWrapper(err, "do_request_failed", http.StatusInternalServerError)
}
logger.Debugf(ctx, "Received response - Status: %d", resp.StatusCode)
if isErrorHappened(meta, resp) {
logger.Errorf(ctx, "Error detected in response")
billing.ReturnPreConsumedQuota(ctx, preConsumedQuota, meta.TokenId)
return RelayErrorHandler(resp)
}
// do response - for anthropic native requests, we need to handle the response directly
usage, respErr := handleAnthropicResponse(c, resp, meta)
if respErr != nil {
logger.Errorf(ctx, "respErr is not nil: %+v", respErr)
billing.ReturnPreConsumedQuota(ctx, preConsumedQuota, meta.TokenId)
return respErr
}
logger.Infof(ctx, "Anthropic request completed - Usage: %+v", usage)
// post-consume quota - for anthropic, we create a placeholder GeneralOpenAIRequest
placeholderRequest := &model.GeneralOpenAIRequest{
Model: anthropicRequest.Model,
}
go postConsumeQuota(ctx, usage, meta, placeholderRequest, ratio, preConsumedQuota, modelRatio, groupRatio, false)
return nil
}
func getAndValidateAnthropicRequest(c *gin.Context) (*anthropic.Request, error) {
anthropicRequest := &anthropic.Request{}
err := common.UnmarshalBodyReusable(c, anthropicRequest)
if err != nil {
return nil, err
}
// Basic validation
if anthropicRequest.Model == "" {
return nil, fmt.Errorf("model is required")
}
if len(anthropicRequest.Messages) == 0 {
return nil, fmt.Errorf("messages are required")
}
if anthropicRequest.MaxTokens == 0 {
anthropicRequest.MaxTokens = 4096 // default max tokens
}
return anthropicRequest, nil
}
func getAnthropicRequestBody(c *gin.Context, anthropicRequest *anthropic.Request) (io.Reader, error) {
// For anthropic native requests, we marshal the request back to JSON
jsonData, err := json.Marshal(anthropicRequest)
if err != nil {
logger.Debugf(c.Request.Context(), "anthropic request json_marshal_failed: %s\n", err.Error())
return nil, err
}
logger.Debugf(c.Request.Context(), "anthropic request: \n%s", string(jsonData))
return bytes.NewBuffer(jsonData), nil
}
const (
// CHARS_PER_TOKEN represents the rough character-to-token ratio for Anthropic models
// This is a conservative estimate: approximately 1 token per 4 characters
CHARS_PER_TOKEN = 4
)
func estimateAnthropicTokens(request *anthropic.Request) int {
// Simple token estimation for Anthropic requests
// This is a rough estimation, actual implementation might need more sophisticated logic
totalTokens := 0
// Count tokens in system prompt
if !request.System.IsEmpty() {
systemText := request.System.String()
totalTokens += len(systemText) / CHARS_PER_TOKEN // rough estimate: 1 token per 4 characters
}
// Count tokens in messages
for _, message := range request.Messages {
for _, content := range message.Content {
if content.Type == "text" {
totalTokens += len(content.Text) / CHARS_PER_TOKEN
}
}
}
return totalTokens
}
func handleAnthropicResponse(c *gin.Context, resp *http.Response, meta *meta.Meta) (*model.Usage, *model.ErrorWithStatusCode) {
// For anthropic native requests, use direct handlers to maintain Anthropic format
if meta.IsStream {
// Handle streaming response - note: DirectStreamHandler returns (error, usage)
err, usage := anthropic.DirectStreamHandler(c, resp)
return usage, err
} else {
// Handle non-streaming response - note: DirectHandler returns (error, usage)
err, usage := anthropic.DirectHandler(c, resp, meta.PromptTokens, meta.ActualModelName)
return usage, err
}
}
func preConsumeQuotaForAnthropic(ctx context.Context, request *anthropic.Request, promptTokens int, ratio float64, meta *meta.Meta) (int64, *model.ErrorWithStatusCode) {
// Use the same quota logic as text requests but adapted for Anthropic
preConsumedTokens := config.PreConsumedQuota + int64(promptTokens)
if request.MaxTokens != 0 {
preConsumedTokens += int64(request.MaxTokens)
}
preConsumedQuota := int64(float64(preConsumedTokens) * ratio)
userQuota, err := dbmodel.CacheGetUserQuota(ctx, meta.UserId)
if err != nil {
return preConsumedQuota, openai.ErrorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
}
if userQuota-preConsumedQuota < 0 {
return preConsumedQuota, openai.ErrorWrapper(fmt.Errorf("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
}
err = dbmodel.CacheDecreaseUserQuota(meta.UserId, preConsumedQuota)
if err != nil {
return preConsumedQuota, openai.ErrorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError)
}
if userQuota > 100*preConsumedQuota {
// in this case, we do not pre-consume quota
// because the user has enough quota
preConsumedQuota = 0
logger.Info(ctx, fmt.Sprintf("user %d has enough quota %d, trusted and no need to pre-consume", meta.UserId, userQuota))
}
if preConsumedQuota > 0 {
err := dbmodel.PreConsumeTokenQuota(meta.TokenId, preConsumedQuota)
if err != nil {
return preConsumedQuota, openai.ErrorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
}
}
return preConsumedQuota, nil
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/songquanpeng/one-api/common/ctxkey" "github.com/songquanpeng/one-api/common/ctxkey"
"github.com/songquanpeng/one-api/model" "github.com/songquanpeng/one-api/model"
"github.com/songquanpeng/one-api/relay/apitype"
"github.com/songquanpeng/one-api/relay/channeltype" "github.com/songquanpeng/one-api/relay/channeltype"
"github.com/songquanpeng/one-api/relay/relaymode" "github.com/songquanpeng/one-api/relay/relaymode"
) )
@@ -62,5 +63,11 @@ func GetByContext(c *gin.Context) *Meta {
meta.BaseURL = channeltype.ChannelBaseURLs[meta.ChannelType] meta.BaseURL = channeltype.ChannelBaseURLs[meta.ChannelType]
} }
meta.APIType = channeltype.ToAPIType(meta.ChannelType) meta.APIType = channeltype.ToAPIType(meta.ChannelType)
// Force Anthropic API type for native Anthropic protocol requests
if meta.Mode == relaymode.AnthropicMessages {
meta.APIType = apitype.Anthropic
}
return &meta return &meta
} }

View File

@@ -13,4 +13,6 @@ const (
AudioTranslation AudioTranslation
// Proxy is a special relay mode for proxying requests to custom upstream // Proxy is a special relay mode for proxying requests to custom upstream
Proxy Proxy
// AnthropicMessages is for native Anthropic API messages endpoint
AnthropicMessages
) )

View File

@@ -26,6 +26,8 @@ func GetByPath(path string) int {
relayMode = AudioTranslation relayMode = AudioTranslation
} else if strings.HasPrefix(path, "/v1/oneapi/proxy") { } else if strings.HasPrefix(path, "/v1/oneapi/proxy") {
relayMode = Proxy relayMode = Proxy
} else if strings.HasPrefix(path, "/anthropic/v1/messages") {
relayMode = AnthropicMessages
} }
return relayMode return relayMode
} }

View File

@@ -71,4 +71,16 @@ func SetRelayRouter(router *gin.Engine) {
relayV1Router.GET("/threads/:id/runs/:runsId/steps/:stepId", controller.RelayNotImplemented) relayV1Router.GET("/threads/:id/runs/:runsId/steps/:stepId", controller.RelayNotImplemented)
relayV1Router.GET("/threads/:id/runs/:runsId/steps", controller.RelayNotImplemented) relayV1Router.GET("/threads/:id/runs/:runsId/steps", controller.RelayNotImplemented)
} }
// Anthropic API compatibility - https://docs.anthropic.com/claude/reference/
anthropicRouter := router.Group("/anthropic")
anthropicRouter.Use(middleware.RelayPanicRecover(), middleware.TokenAuth(), middleware.Distribute())
{
// Models API
anthropicRouter.GET("/v1/models", controller.ListModels)
anthropicRouter.GET("/v1/models/:model", controller.RetrieveModel)
// Messages API - main endpoint for chat completions
anthropicRouter.POST("/v1/messages", controller.Relay)
}
} }