feat: support gemini-2.0-flash

This commit is contained in:
Laisky.Cai
2025-02-06 02:34:41 +00:00
parent aa30c37e3c
commit 3e4708b77e
6 changed files with 30 additions and 11 deletions

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"io"
"net/http"
"strings"
"github.com/gin-gonic/gin"
"github.com/pkg/errors"
@@ -24,13 +25,9 @@ func (a *Adaptor) Init(meta *meta.Meta) {
}
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
var defaultVersion string
switch meta.ActualModelName {
case "gemini-2.0-flash-exp",
"gemini-2.0-flash-thinking-exp-01-21":
defaultVersion := config.GeminiVersion
if strings.Contains(meta.ActualModelName, "-2.0") {
defaultVersion = "v1beta"
default:
defaultVersion = config.GeminiVersion
}
version := helper.AssignOrDefault(meta.Config.APIVersion, defaultVersion)

View File

@@ -4,8 +4,12 @@ package gemini
var ModelList = []string{
"gemini-pro", "gemini-1.0-pro",
"gemini-1.5-flash", "gemini-1.5-pro",
// "gemma-2-2b-it", "gemma-2-9b-it", "gemma-2-27b-it",
"gemini-1.5-flash", "gemini-1.5-flash-8b",
"gemini-1.5-pro", "gemini-1.5-pro-experimental",
"text-embedding-004", "aqa",
"gemini-2.0-flash-exp",
"gemini-2.0-pro-exp-02-05",
"gemini-2.0-flash", "gemini-2.0-flash-exp",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-2.0-flash-thinking-exp-01-21",
}

View File

@@ -160,6 +160,15 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
geminiRequest.Contents = append(geminiRequest.Contents, content)
}
// As of 2025-02-06, the newly released gemini 2.0 models do not support system_instruction,
// which can reasonably be considered a bug. Google may fix this issue in the future.
if geminiRequest.SystemInstruction != nil &&
strings.Contains(textRequest.Model, "-2.0") &&
textRequest.Model != "gemini-2.0-flash-exp" &&
textRequest.Model != "gemini-2.0-flash-thinking-exp-01-21" {
geminiRequest.SystemInstruction = nil
}
return &geminiRequest
}

View File

@@ -122,14 +122,23 @@ var ModelRatio = map[string]float64{
"bge-large-en": 0.002 * RMB,
"tao-8k": 0.002 * RMB,
// https://ai.google.dev/pricing
// https://cloud.google.com/vertex-ai/generative-ai/pricing
// "gemma-2-2b-it": 0,
// "gemma-2-9b-it": 0,
// "gemma-2-27b-it": 0,
"gemini-pro": 0.25 * MILLI_USD, // $0.00025 / 1k characters -> $0.001 / 1k tokens
"gemini-1.0-pro": 0.125 * MILLI_USD,
"gemini-1.5-pro": 1.25 * MILLI_USD,
"gemini-1.5-pro-001": 1.25 * MILLI_USD,
"gemini-1.5-pro-experimental": 1.25 * MILLI_USD,
"gemini-1.5-flash": 0.075 * MILLI_USD,
"gemini-1.5-flash-001": 0.075 * MILLI_USD,
"gemini-1.5-flash-8b": 0.0375 * MILLI_USD,
"gemini-2.0-flash-exp": 0.075 * MILLI_USD,
"gemini-2.0-flash": 0.15 * MILLI_USD,
"gemini-2.0-flash-lite-preview-02-05": 0.075 * MILLI_USD,
"gemini-2.0-flash-thinking-exp-01-21": 0.075 * MILLI_USD,
"gemini-2.0-pro-exp-02-05": 1.25 * MILLI_USD,
"aqa": 1,
// https://open.bigmodel.cn/pricing
"glm-zero-preview": 0.01 * RMB,

View File

@@ -58,7 +58,7 @@ function renderType(type) {
case 2:
return (
<Label basic color='olive'>
Consumption
Consumed
</Label>
);
case 3:
@@ -564,7 +564,7 @@ const LogsTable = () => {
{log.completion_tokens ? log.completion_tokens : ''}
</Table.Cell>
<Table.Cell>
{log.quota ? renderQuota(log.quota, t, 6) : ''}
{log.quota ? renderQuota(log.quota, t, 6) : 'free'}
</Table.Cell>
</>
)}

View File

@@ -224,4 +224,4 @@ export function getChannelModels(type) {
return channelModels[type];
}
return [];
}
}