Compare commits

...

4 Commits

Author SHA1 Message Date
Deadwalk
8c01a989f1 Merge 290931b506 into 8df4a2670b 2025-09-28 08:48:30 +00:00
Deadwalk
290931b506 fix: improve Dockerfile build configuration for ARM64 compatibility
- Add comprehensive npm mirror configuration for faster package downloads
- Use parallel npm install and build processes for better performance
- Configure Go proxy and Alpine package mirrors for network reliability
- Fix frontend dependency issues that caused blank page display

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-28 16:48:24 +08:00
Deadwalk
48396d3f33 fix: 修复CodeReview发现的安全问题和代码质量问题 | fix security and code quality issues identified by CodeReview
- 修复JSON注入漏洞:使用json.Marshal()安全转义字符串内容
- 定义常量CHARS_PER_TOKEN替换硬编码的token估算数字4
- 处理UnmarshalJSON错误,避免静默失败并记录错误日志
- 定义常量替换硬编码的API端点路径,提高可维护性

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-28 16:41:48 +08:00
Deadwalk
e27612a620 refactor: 优化Anthropic协议日志输出 | optimize Anthropic protocol log output
- 清理不必要的调试日志,减少生产环境噪音
- 将详细日志从Infof降级为Debugf级别
- 保留关键错误日志和重要流程信息
- 优化日志结构,提高可读性和维护性

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-28 16:22:18 +08:00
4 changed files with 70 additions and 40 deletions

View File

@@ -1,15 +1,28 @@
FROM --platform=$BUILDPLATFORM node:16 AS builder
# 配置npm使用淘宝镜像源
RUN npm config set registry https://registry.npmmirror.com && \
npm config set disturl https://npmmirror.com/dist && \
npm config set sass_binary_site https://npmmirror.com/mirrors/node-sass/ && \
npm config set electron_mirror https://npmmirror.com/mirrors/electron/ && \
npm config set puppeteer_download_host https://npmmirror.com/mirrors && \
npm config set chromedriver_cdnurl https://npmmirror.com/mirrors/chromedriver && \
npm config set operadriver_cdnurl https://npmmirror.com/mirrors/operadriver && \
npm config set phantomjs_cdnurl https://npmmirror.com/mirrors/phantomjs && \
npm config set selenium_cdnurl https://npmmirror.com/mirrors/selenium && \
npm config set node_inspector_cdnurl https://npmmirror.com/mirrors/node-inspector
WORKDIR /web
COPY ./VERSION .
COPY ./web .
RUN npm config set registry https://registry.npmmirror.com && \
npm install --prefix /web/default --legacy-peer-deps --retry 5 & \
npm install --prefix /web/berry --legacy-peer-deps --retry 5 & \
npm install --prefix /web/air --legacy-peer-deps --retry 5 & \
# 并行安装npm依赖提高构建速度
RUN npm install --prefix /web/default --prefer-offline --no-audit & \
npm install --prefix /web/berry --prefer-offline --no-audit & \
npm install --prefix /web/air --prefer-offline --no-audit & \
wait
# 并行构建前端项目,提高构建速度
RUN DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat ./VERSION) npm run build --prefix /web/default & \
DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat ./VERSION) npm run build --prefix /web/berry & \
DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat ./VERSION) npm run build --prefix /web/air & \
@@ -17,22 +30,26 @@ RUN DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat ./VERSION) npm run buil
FROM golang:alpine AS builder2
RUN apk add --no-cache \
# 配置Go使用国内镜像源
ENV GOPROXY=https://goproxy.cn,direct \
GOSUMDB=sum.golang.google.cn \
GO111MODULE=on \
CGO_ENABLED=1 \
GOOS=linux
# 使用阿里云 Alpine 源以加速 apk 包安装
RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories && \
apk update && \
apk add --no-cache \
gcc \
musl-dev \
sqlite-dev \
build-base
ENV GO111MODULE=on \
CGO_ENABLED=1 \
GOOS=linux \
GOPROXY=https://goproxy.cn,direct \
GOSUMDB=off
WORKDIR /build
ADD go.mod go.sum ./
RUN go mod download -x
RUN go mod download
COPY . .
COPY --from=builder /web/build ./web/build
@@ -41,7 +58,10 @@ RUN go build -trimpath -ldflags "-s -w -X 'github.com/songquanpeng/one-api/commo
FROM alpine:latest
RUN apk add --no-cache ca-certificates tzdata
# 使用阿里云 Alpine 源以加速 apk 包安装
RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories && \
apk update && \
apk add --no-cache ca-certificates tzdata
COPY --from=builder2 /build/one-api /

View File

@@ -14,6 +14,13 @@ import (
"github.com/songquanpeng/one-api/relay/relaymode"
)
const (
// NativeAnthropicEndpoint is the endpoint for native Anthropic API
NativeAnthropicEndpoint = "/v1/messages"
// ThirdPartyAnthropicEndpoint is the endpoint for third-party providers supporting Anthropic protocol
ThirdPartyAnthropicEndpoint = "/anthropic/v1/messages"
)
type Adaptor struct {
}
@@ -24,13 +31,13 @@ func (a *Adaptor) Init(meta *meta.Meta) {
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
// For native Anthropic API
if strings.Contains(meta.BaseURL, "api.anthropic.com") {
return fmt.Sprintf("%s/v1/messages", meta.BaseURL), nil
return fmt.Sprintf("%s%s", meta.BaseURL, NativeAnthropicEndpoint), nil
}
// For third-party providers supporting Anthropic protocol (like DeepSeek)
// They typically expose the endpoint at /anthropic/v1/messages
baseURL := strings.TrimSuffix(meta.BaseURL, "/")
return fmt.Sprintf("%s/anthropic/v1/messages", baseURL), nil
return fmt.Sprintf("%s%s", baseURL, ThirdPartyAnthropicEndpoint), nil
}
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *meta.Meta) error {

View File

@@ -93,9 +93,15 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
if message.Role == "system" && claudeRequest.System.IsEmpty() {
// Create a SystemPrompt from the string content
systemPrompt := SystemPrompt{}
systemData := []byte(`"` + message.StringContent() + `"`) // Wrap in JSON string quotes
_ = systemPrompt.UnmarshalJSON(systemData)
claudeRequest.System = systemPrompt
systemData, err := json.Marshal(message.StringContent()) // Safely escape string for JSON
if err != nil {
logger.SysError(fmt.Sprintf("Failed to marshal system prompt: %v", err))
} else {
if err := systemPrompt.UnmarshalJSON(systemData); err != nil {
logger.SysError(fmt.Sprintf("Failed to unmarshal system prompt: %v", err))
}
claudeRequest.System = systemPrompt
}
continue
}
claudeMessage := Message{
@@ -386,9 +392,7 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName st
// DirectHandler handles native Anthropic API responses without conversion to OpenAI format
func DirectHandler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) {
ctx := c.Request.Context()
logger.Infof(ctx, "=== DirectHandler Start ===")
logger.Infof(ctx, "Response status: %d", resp.StatusCode)
logger.Infof(ctx, "Response headers: %+v", resp.Header)
logger.Debugf(ctx, "DirectHandler - Response status: %d", resp.StatusCode)
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
@@ -401,7 +405,7 @@ func DirectHandler(c *gin.Context, resp *http.Response, promptTokens int, modelN
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
}
logger.Infof(ctx, "Raw response body: %s", string(responseBody))
logger.Debugf(ctx, "Raw response body: %s", string(responseBody))
var claudeResponse Response
err = json.Unmarshal(responseBody, &claudeResponse)
@@ -421,7 +425,7 @@ func DirectHandler(c *gin.Context, resp *http.Response, promptTokens int, modelN
return nil, usage
}
logger.Infof(ctx, "Parsed response - ID: %s, Model: %s, Usage: %+v",
logger.Debugf(ctx, "Parsed response - ID: %s, Model: %s, Usage: %+v",
claudeResponse.Id, claudeResponse.Model, claudeResponse.Usage)
if claudeResponse.Error.Type != "" {
@@ -444,7 +448,7 @@ func DirectHandler(c *gin.Context, resp *http.Response, promptTokens int, modelN
TotalTokens: claudeResponse.Usage.InputTokens + claudeResponse.Usage.OutputTokens,
}
logger.Infof(ctx, "Usage calculated: %+v", usage)
logger.Debugf(ctx, "Usage calculated: %+v", usage)
// Write the original Anthropic response directly
c.Writer.Header().Set("Content-Type", "application/json")
@@ -455,8 +459,7 @@ func DirectHandler(c *gin.Context, resp *http.Response, promptTokens int, modelN
return openai.ErrorWrapper(err, "write_response_failed", http.StatusInternalServerError), nil
}
logger.Infof(ctx, "Response written successfully")
logger.Infof(ctx, "=== DirectHandler End ===")
logger.Debugf(ctx, "Response written successfully")
return nil, &usage
}

View File

@@ -28,10 +28,7 @@ func RelayAnthropicHelper(c *gin.Context) *model.ErrorWithStatusCode {
ctx := c.Request.Context()
meta := meta.GetByContext(c)
logger.Infof(ctx, "=== Anthropic Request Start ===")
logger.Infof(ctx, "Request URL: %s", c.Request.URL.String())
logger.Infof(ctx, "Request Method: %s", c.Request.Method)
logger.Infof(ctx, "Request Headers: %+v", c.Request.Header)
logger.Infof(ctx, "Anthropic request received - URL: %s", c.Request.URL.String())
// get & validate anthropic request
anthropicRequest, err := getAndValidateAnthropicRequest(c)
@@ -39,7 +36,7 @@ func RelayAnthropicHelper(c *gin.Context) *model.ErrorWithStatusCode {
logger.Errorf(ctx, "getAndValidateAnthropicRequest failed: %s", err.Error())
return openai.ErrorWrapper(err, "invalid_anthropic_request", http.StatusBadRequest)
}
logger.Infof(ctx, "Parsed anthropic request - Model: %s, Stream: %v, Messages: %d",
logger.Debugf(ctx, "Parsed anthropic request - Model: %s, Stream: %v, Messages: %d",
anthropicRequest.Model, anthropicRequest.Stream, len(anthropicRequest.Messages))
meta.IsStream = anthropicRequest.Stream
@@ -65,14 +62,14 @@ func RelayAnthropicHelper(c *gin.Context) *model.ErrorWithStatusCode {
return bizErr
}
logger.Infof(ctx, "Meta info - APIType: %d, ChannelType: %d, BaseURL: %s", meta.APIType, meta.ChannelType, meta.BaseURL)
logger.Debugf(ctx, "Meta info - APIType: %d, ChannelType: %d, BaseURL: %s", meta.APIType, meta.ChannelType, meta.BaseURL)
adaptor := relay.GetAdaptor(meta.APIType)
if adaptor == nil {
logger.Errorf(ctx, "Failed to get adaptor for API type: %d", meta.APIType)
return openai.ErrorWrapper(fmt.Errorf("invalid api type: %d", meta.APIType), "invalid_api_type", http.StatusBadRequest)
}
logger.Infof(ctx, "Using adaptor: %s", adaptor.GetChannelName())
logger.Debugf(ctx, "Using adaptor: %s", adaptor.GetChannelName())
adaptor.Init(meta)
// get request body - for anthropic passthrough, we directly use the request body
@@ -82,13 +79,12 @@ func RelayAnthropicHelper(c *gin.Context) *model.ErrorWithStatusCode {
}
// do request
logger.Infof(ctx, "Sending request to upstream...")
resp, err := adaptor.DoRequest(c, meta, requestBody)
if err != nil {
logger.Errorf(ctx, "DoRequest failed: %s", err.Error())
return openai.ErrorWrapper(err, "do_request_failed", http.StatusInternalServerError)
}
logger.Infof(ctx, "Received response - Status: %d, Headers: %+v", resp.StatusCode, resp.Header)
logger.Debugf(ctx, "Received response - Status: %d", resp.StatusCode)
if isErrorHappened(meta, resp) {
logger.Errorf(ctx, "Error detected in response")
@@ -97,7 +93,6 @@ func RelayAnthropicHelper(c *gin.Context) *model.ErrorWithStatusCode {
}
// do response - for anthropic native requests, we need to handle the response directly
logger.Infof(ctx, "Processing anthropic response...")
usage, respErr := handleAnthropicResponse(c, resp, meta)
if respErr != nil {
logger.Errorf(ctx, "respErr is not nil: %+v", respErr)
@@ -105,8 +100,7 @@ func RelayAnthropicHelper(c *gin.Context) *model.ErrorWithStatusCode {
return respErr
}
logger.Infof(ctx, "Response processed successfully - Usage: %+v", usage)
logger.Infof(ctx, "=== Anthropic Request End ===")
logger.Infof(ctx, "Anthropic request completed - Usage: %+v", usage)
// post-consume quota - for anthropic, we create a placeholder GeneralOpenAIRequest
placeholderRequest := &model.GeneralOpenAIRequest{
@@ -148,6 +142,12 @@ func getAnthropicRequestBody(c *gin.Context, anthropicRequest *anthropic.Request
return bytes.NewBuffer(jsonData), nil
}
const (
// CHARS_PER_TOKEN represents the rough character-to-token ratio for Anthropic models
// This is a conservative estimate: approximately 1 token per 4 characters
CHARS_PER_TOKEN = 4
)
func estimateAnthropicTokens(request *anthropic.Request) int {
// Simple token estimation for Anthropic requests
// This is a rough estimation, actual implementation might need more sophisticated logic
@@ -156,14 +156,14 @@ func estimateAnthropicTokens(request *anthropic.Request) int {
// Count tokens in system prompt
if !request.System.IsEmpty() {
systemText := request.System.String()
totalTokens += len(systemText) / 4 // rough estimate: 1 token per 4 characters
totalTokens += len(systemText) / CHARS_PER_TOKEN // rough estimate: 1 token per 4 characters
}
// Count tokens in messages
for _, message := range request.Messages {
for _, content := range message.Content {
if content.Type == "text" {
totalTokens += len(content.Text) / 4
totalTokens += len(content.Text) / CHARS_PER_TOKEN
}
}
}