feat: support OpenRouter reasoning

This commit is contained in:
Laisky.Cai 2025-02-19 01:11:46 +00:00
parent 7ac553541b
commit 480f248a3d
7 changed files with 119 additions and 20 deletions

View File

@ -1,6 +1,9 @@
package conv
func AsString(v any) string {
str, _ := v.(string)
if str, ok := v.(string); ok {
return str
}
return ""
}

View File

@ -9,6 +9,8 @@ import (
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/adaptor"
"github.com/songquanpeng/one-api/relay/adaptor/alibailian"
"github.com/songquanpeng/one-api/relay/adaptor/baiduv2"
@ -16,6 +18,7 @@ import (
"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
"github.com/songquanpeng/one-api/relay/adaptor/minimax"
"github.com/songquanpeng/one-api/relay/adaptor/novita"
"github.com/songquanpeng/one-api/relay/adaptor/openrouter"
"github.com/songquanpeng/one-api/relay/channeltype"
"github.com/songquanpeng/one-api/relay/meta"
"github.com/songquanpeng/one-api/relay/model"
@ -85,7 +88,28 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
if request == nil {
return nil, errors.New("request is nil")
}
if request.Stream {
meta := meta.GetByContext(c)
switch meta.ChannelType {
case channeltype.OpenRouter:
includeReasoning := true
request.IncludeReasoning = &includeReasoning
if request.Provider == nil || request.Provider.Sort == "" {
if request.Provider == nil {
request.Provider = &openrouter.RequestProvider{}
}
request.Provider.Sort = "throughput"
}
default:
}
if request.Stream && !config.EnforceIncludeUsage {
logger.Warn(c.Request.Context(),
"please set ENFORCE_INCLUDE_USAGE=true to ensure accurate billing in stream mode")
}
if config.EnforceIncludeUsage && request.Stream {
// always return usage in stream mode
if request.StreamOptions == nil {
request.StreamOptions = &model.StreamOptions{}

View File

@ -8,12 +8,11 @@ import (
"net/http"
"strings"
"github.com/songquanpeng/one-api/common/render"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/conv"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/common/render"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/relaymode"
)
@ -26,6 +25,7 @@ const (
func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.ErrorWithStatusCode, string, *model.Usage) {
responseText := ""
reasoningText := ""
scanner := bufio.NewScanner(resp.Body)
scanner.Split(bufio.ScanLines)
var usage *model.Usage
@ -61,6 +61,10 @@ func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.E
}
render.StringData(c, data)
for _, choice := range streamResponse.Choices {
if choice.Delta.Reasoning != nil {
reasoningText += *choice.Delta.Reasoning
}
responseText += conv.AsString(choice.Delta.Content)
}
if streamResponse.Usage != nil {
@ -93,7 +97,7 @@ func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.E
return ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), "", nil
}
return nil, responseText, usage
return nil, reasoningText + responseText, usage
}
func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) {
@ -147,5 +151,6 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName st
TotalTokens: promptTokens + completionTokens,
}
}
return nil, &textResponse.Usage
}

View File

@ -0,0 +1,22 @@
package openrouter
// RequestProvider customize how your requests are routed using the provider object
// in the request body for Chat Completions and Completions.
//
// https://openrouter.ai/docs/features/provider-routing
type RequestProvider struct {
// Order is list of provider names to try in order (e.g. ["Anthropic", "OpenAI"]). Default: empty
Order []string `json:"order,omitempty"`
// AllowFallbacks is whether to allow backup providers when the primary is unavailable. Default: true
AllowFallbacks bool `json:"allow_fallbacks,omitempty"`
// RequireParameters is only use providers that support all parameters in your request. Default: false
RequireParameters bool `json:"require_parameters,omitempty"`
// DataCollection is control whether to use providers that may store data ("allow" or "deny"). Default: "allow"
DataCollection string `json:"data_collection,omitempty" binding:"omitempty,oneof=allow deny"`
// Ignore is list of provider names to skip for this request. Default: empty
Ignore []string `json:"ignore,omitempty"`
// Quantizations is list of quantization levels to filter by (e.g. ["int4", "int8"]). Default: empty
Quantizations []string `json:"quantizations,omitempty"`
// Sort is sort providers by price or throughput (e.g. "price" or "throughput"). Default: empty
Sort string `json:"sort,omitempty" binding:"omitempty,oneof=price throughput latency"`
}

View File

@ -1,5 +1,7 @@
package model
import "github.com/songquanpeng/one-api/relay/adaptor/openrouter"
type ResponseFormat struct {
Type string `json:"type,omitempty"`
JsonSchema *JSONSchema `json:"json_schema,omitempty"`
@ -66,6 +68,11 @@ type GeneralOpenAIRequest struct {
// Others
Instruction string `json:"instruction,omitempty"`
NumCtx int `json:"num_ctx,omitempty"`
// -------------------------------------
// Openrouter
// -------------------------------------
Provider *openrouter.RequestProvider `json:"provider,omitempty"`
IncludeReasoning *bool `json:"include_reasoning,omitempty"`
}
func (r GeneralOpenAIRequest) ParseInput() []string {

View File

@ -2,11 +2,34 @@ package model
type Message struct {
Role string `json:"role,omitempty"`
// Content is a string or a list of objects
Content any `json:"content,omitempty"`
ReasoningContent any `json:"reasoning_content,omitempty"`
Name *string `json:"name,omitempty"`
ToolCalls []Tool `json:"tool_calls,omitempty"`
ToolCallId string `json:"tool_call_id,omitempty"`
Audio *messageAudio `json:"audio,omitempty"`
// -------------------------------------
// Deepseek 专有的一些字段
// https://api-docs.deepseek.com/api/create-chat-completion
// -------------------------------------
// Prefix forces the model to begin its answer with the supplied prefix in the assistant message.
// To enable this feature, set base_url to "https://api.deepseek.com/beta".
Prefix *bool `json:"prefix,omitempty"` // ReasoningContent is Used for the deepseek-reasoner model in the Chat
// Prefix Completion feature as the input for the CoT in the last assistant message.
// When using this feature, the prefix parameter must be set to true.
ReasoningContent *string `json:"reasoning_content,omitempty"`
// -------------------------------------
// Openrouter
// -------------------------------------
Reasoning *string `json:"reasoning,omitempty"`
Refusal *bool `json:"refusal,omitempty"`
}
type messageAudio struct {
Id string `json:"id"`
Data string `json:"data,omitempty"`
ExpiredAt int `json:"expired_at,omitempty"`
Transcript string `json:"transcript,omitempty"`
}
func (m Message) IsStringContent() bool {

View File

@ -4,14 +4,12 @@ type Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
CompletionTokensDetails *CompletionTokensDetails `json:"completion_tokens_details,omitempty"`
}
type CompletionTokensDetails struct {
ReasoningTokens int `json:"reasoning_tokens"`
AcceptedPredictionTokens int `json:"accepted_prediction_tokens"`
RejectedPredictionTokens int `json:"rejected_prediction_tokens"`
// PromptTokensDetails may be empty for some models
PromptTokensDetails *usagePromptTokensDetails `gorm:"-" json:"prompt_tokens_details,omitempty"`
// CompletionTokensDetails may be empty for some models
CompletionTokensDetails *usageCompletionTokensDetails `gorm:"-" json:"completion_tokens_details,omitempty"`
ServiceTier string `gorm:"-" json:"service_tier,omitempty"`
SystemFingerprint string `gorm:"-" json:"system_fingerprint,omitempty"`
}
type Error struct {
@ -25,3 +23,20 @@ type ErrorWithStatusCode struct {
Error
StatusCode int `json:"status_code"`
}
type usagePromptTokensDetails struct {
CachedTokens int `json:"cached_tokens"`
AudioTokens int `json:"audio_tokens"`
// TextTokens could be zero for pure text chats
TextTokens int `json:"text_tokens"`
ImageTokens int `json:"image_tokens"`
}
type usageCompletionTokensDetails struct {
ReasoningTokens int `json:"reasoning_tokens"`
AudioTokens int `json:"audio_tokens"`
AcceptedPredictionTokens int `json:"accepted_prediction_tokens"`
RejectedPredictionTokens int `json:"rejected_prediction_tokens"`
// TextTokens could be zero for pure text chats
TextTokens int `json:"text_tokens"`
}