feat: enhance Gemini API to support image response modalities and update model ratios

This commit is contained in:
Laisky.Cai 2025-03-17 01:22:33 +00:00
parent 8f50f485a9
commit 080b6036fb
6 changed files with 89 additions and 33 deletions

View File

@ -44,7 +44,7 @@ func GetRandomSatisfiedChannel(group string, model string, ignoreFirstPriority b
err = channelQuery.Order("RAND()").First(&ability).Error err = channelQuery.Order("RAND()").First(&ability).Error
} }
if err != nil { if err != nil {
return nil, err return nil, errors.Wrap(err, "get random satisfied channel")
} }
channel := Channel{} channel := Channel{}
channel.Id = ability.ChannelId channel.Id = ability.ChannelId

View File

@ -19,6 +19,9 @@ var ModelsSupportSystemInstruction = []string{
// "gemini-1.5-pro-experimental", // "gemini-1.5-pro-experimental",
"gemini-2.0-flash", "gemini-2.0-flash-exp", "gemini-2.0-flash", "gemini-2.0-flash-exp",
"gemini-2.0-flash-thinking-exp-01-21", "gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-flash-lite",
"gemini-2.0-flash-exp-image-generation",
"gemini-2.0-pro-exp-02-05",
} }
// IsModelSupportSystemInstruction check if the model support system instruction. // IsModelSupportSystemInstruction check if the model support system instruction.

View File

@ -62,6 +62,9 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
Temperature: textRequest.Temperature, Temperature: textRequest.Temperature,
TopP: textRequest.TopP, TopP: textRequest.TopP,
MaxOutputTokens: textRequest.MaxTokens, MaxOutputTokens: textRequest.MaxTokens,
ResponseModalities: []string{
"TEXT", "IMAGE",
},
}, },
} }
if textRequest.ResponseFormat != nil { if textRequest.ResponseFormat != nil {
@ -256,19 +259,52 @@ func responseGeminiChat2OpenAI(response *ChatResponse) *openai.TextResponse {
if candidate.Content.Parts[0].FunctionCall != nil { if candidate.Content.Parts[0].FunctionCall != nil {
choice.Message.ToolCalls = getToolCalls(&candidate) choice.Message.ToolCalls = getToolCalls(&candidate)
} else { } else {
// Handle text and image content
var builder strings.Builder var builder strings.Builder
var contentItems []model.MessageContent
for _, part := range candidate.Content.Parts { for _, part := range candidate.Content.Parts {
if i > 0 { if part.Text != "" {
builder.WriteString("\n") // For text parts
if i > 0 {
builder.WriteString("\n")
}
builder.WriteString(part.Text)
// Add to content items
contentItems = append(contentItems, model.MessageContent{
Type: model.ContentTypeText,
Text: part.Text,
})
}
if part.InlineData != nil && part.InlineData.MimeType != "" && part.InlineData.Data != "" {
// For inline image data
imageURL := &model.ImageURL{
// The data is already base64 encoded
Url: fmt.Sprintf("data:%s;base64,%s", part.InlineData.MimeType, part.InlineData.Data),
}
contentItems = append(contentItems, model.MessageContent{
Type: model.ContentTypeImageURL,
ImageURL: imageURL,
})
} }
builder.WriteString(part.Text)
} }
choice.Message.Content = builder.String()
// If we have multiple content types, use structured content format
if len(contentItems) > 1 || (len(contentItems) == 1 && contentItems[0].Type != model.ContentTypeText) {
choice.Message.Content = contentItems
} else {
// Otherwise use the simple string content format
choice.Message.Content = builder.String()
}
} }
} else { } else {
choice.Message.Content = "" choice.Message.Content = ""
choice.FinishReason = candidate.FinishReason choice.FinishReason = candidate.FinishReason
} }
fullTextResponse.Choices = append(fullTextResponse.Choices, choice) fullTextResponse.Choices = append(fullTextResponse.Choices, choice)
} }
return &fullTextResponse return &fullTextResponse
@ -359,6 +395,7 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName st
if err != nil { if err != nil {
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
} }
err = resp.Body.Close() err = resp.Body.Close()
if err != nil { if err != nil {
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil

View File

@ -6,6 +6,19 @@ type ChatRequest struct {
GenerationConfig ChatGenerationConfig `json:"generation_config,omitempty"` GenerationConfig ChatGenerationConfig `json:"generation_config,omitempty"`
Tools []ChatTools `json:"tools,omitempty"` Tools []ChatTools `json:"tools,omitempty"`
SystemInstruction *ChatContent `json:"system_instruction,omitempty"` SystemInstruction *ChatContent `json:"system_instruction,omitempty"`
ModelVersion string `json:"model_version,omitempty"`
UsageMetadata *UsageMetadata `json:"usage_metadata,omitempty"`
}
type UsageMetadata struct {
PromptTokenCount int `json:"promptTokenCount,omitempty"`
TotalTokenCount int `json:"totalTokenCount,omitempty"`
PromptTokensDetails []PromptTokensDetails `json:"promptTokensDetails,omitempty"`
}
type PromptTokensDetails struct {
Modality string `json:"modality,omitempty"`
TokenCount int `json:"tokenCount,omitempty"`
} }
type EmbeddingRequest struct { type EmbeddingRequest struct {
@ -66,14 +79,15 @@ type ChatTools struct {
} }
type ChatGenerationConfig struct { type ChatGenerationConfig struct {
ResponseMimeType string `json:"responseMimeType,omitempty"` ResponseMimeType string `json:"responseMimeType,omitempty"`
ResponseSchema any `json:"responseSchema,omitempty"` ResponseSchema any `json:"responseSchema,omitempty"`
Temperature *float64 `json:"temperature,omitempty"` Temperature *float64 `json:"temperature,omitempty"`
TopP *float64 `json:"topP,omitempty"` TopP *float64 `json:"topP,omitempty"`
TopK float64 `json:"topK,omitempty"` TopK float64 `json:"topK,omitempty"`
MaxOutputTokens int `json:"maxOutputTokens,omitempty"` MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
CandidateCount int `json:"candidateCount,omitempty"` CandidateCount int `json:"candidateCount,omitempty"`
StopSequences []string `json:"stopSequences,omitempty"` StopSequences []string `json:"stopSequences,omitempty"`
ResponseModalities []string `json:"responseModalities,omitempty"`
} }
type FunctionCallingConfig struct { type FunctionCallingConfig struct {

View File

@ -12,4 +12,5 @@ var ModelList = []string{
"gemini-2.0-flash-lite", "gemini-2.0-flash-lite",
"gemini-2.0-flash-thinking-exp-01-21", "gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-pro-exp-02-05", "gemini-2.0-pro-exp-02-05",
"gemini-2.0-flash-exp-image-generation",
} }

View File

@ -147,26 +147,27 @@ var ModelRatio = map[string]float64{
// "gemma-2-2b-it": 0, // "gemma-2-2b-it": 0,
// "gemma-2-9b-it": 0, // "gemma-2-9b-it": 0,
// "gemma-2-27b-it": 0, // "gemma-2-27b-it": 0,
"gemini-pro": 0.25 * MilliTokensUsd, // $0.00025 / 1k characters -> $0.001 / 1k tokens "gemini-pro": 0.25 * MilliTokensUsd, // $0.00025 / 1k characters -> $0.001 / 1k tokens
"gemini-1.0-pro": 0.125 * MilliTokensUsd, "gemini-1.0-pro": 0.125 * MilliTokensUsd,
"gemini-1.0-pro-vision": 0.125 * MilliTokensUsd, "gemini-1.0-pro-vision": 0.125 * MilliTokensUsd,
"gemini-1.5-pro": 1.25 * MilliTokensUsd, "gemini-1.5-pro": 1.25 * MilliTokensUsd,
"gemini-1.5-pro-001": 1.25 * MilliTokensUsd, "gemini-1.5-pro-001": 1.25 * MilliTokensUsd,
"gemini-1.5-pro-002": 1.25 * MilliTokensUsd, "gemini-1.5-pro-002": 1.25 * MilliTokensUsd,
"gemini-1.5-pro-experimental": 1.25 * MilliTokensUsd, "gemini-1.5-pro-experimental": 1.25 * MilliTokensUsd,
"gemini-1.5-flash": 0.075 * MilliTokensUsd, "gemini-1.5-flash": 0.075 * MilliTokensUsd,
"gemini-1.5-flash-001": 0.075 * MilliTokensUsd, "gemini-1.5-flash-001": 0.075 * MilliTokensUsd,
"gemini-1.5-flash-002": 0.075 * MilliTokensUsd, "gemini-1.5-flash-002": 0.075 * MilliTokensUsd,
"gemini-1.5-flash-8b": 0.0375 * MilliTokensUsd, "gemini-1.5-flash-8b": 0.0375 * MilliTokensUsd,
"gemini-2.0-flash": 0.15 * MilliTokensUsd, "gemini-2.0-flash": 0.15 * MilliTokensUsd,
"gemini-2.0-flash-exp": 0.075 * MilliTokensUsd, "gemini-2.0-flash-exp": 0.075 * MilliTokensUsd,
"gemini-2.0-flash-001": 0.15 * MilliTokensUsd, "gemini-2.0-flash-001": 0.15 * MilliTokensUsd,
"gemini-2.0-flash-lite": 0.075 * MilliTokensUsd, "gemini-2.0-flash-lite": 0.075 * MilliTokensUsd,
"gemini-2.0-flash-lite-001": 0.075 * MilliTokensUsd, "gemini-2.0-flash-lite-001": 0.075 * MilliTokensUsd,
"gemini-2.0-flash-lite-preview-02-05": 0.075 * MilliTokensUsd, "gemini-2.0-flash-lite-preview-02-05": 0.075 * MilliTokensUsd,
"gemini-2.0-flash-thinking-exp-01-21": 0.075 * MilliTokensUsd, "gemini-2.0-flash-thinking-exp-01-21": 0.075 * MilliTokensUsd,
"gemini-2.0-pro-exp-02-05": 1.25 * MilliTokensUsd, "gemini-2.0-pro-exp-02-05": 1.25 * MilliTokensUsd,
"aqa": 1, "gemini-2.0-flash-exp-image-generation": 0.075 * MilliTokensUsd,
"aqa": 1,
// https://open.bigmodel.cn/pricing // https://open.bigmodel.cn/pricing
"glm-zero-preview": 0.01 * KiloRmb, "glm-zero-preview": 0.01 * KiloRmb,
"glm-4-plus": 0.05 * KiloRmb, "glm-4-plus": 0.05 * KiloRmb,