mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-09-17 17:16:38 +08:00
Merge pull request #18 from jinjunliu/main
feat: add support for gemini-2.0-flash-thinking-exp model
This commit is contained in:
commit
00f5e25e57
@ -24,9 +24,13 @@ func (a *Adaptor) Init(meta *meta.Meta) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
||||||
defaultVersion := config.GeminiVersion
|
var defaultVersion string
|
||||||
if meta.ActualModelName == "gemini-2.0-flash-exp" {
|
switch meta.ActualModelName {
|
||||||
|
case "gemini-2.0-flash-exp",
|
||||||
|
"gemini-2.0-flash-thinking-exp":
|
||||||
defaultVersion = "v1beta"
|
defaultVersion = "v1beta"
|
||||||
|
default:
|
||||||
|
defaultVersion = config.GeminiVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
version := helper.AssignOrDefault(meta.Config.APIVersion, defaultVersion)
|
version := helper.AssignOrDefault(meta.Config.APIVersion, defaultVersion)
|
||||||
|
@ -6,5 +6,5 @@ var ModelList = []string{
|
|||||||
"gemini-pro", "gemini-1.0-pro",
|
"gemini-pro", "gemini-1.0-pro",
|
||||||
"gemini-1.5-flash", "gemini-1.5-pro",
|
"gemini-1.5-flash", "gemini-1.5-pro",
|
||||||
"text-embedding-004", "aqa",
|
"text-embedding-004", "aqa",
|
||||||
"gemini-2.0-flash-exp",
|
"gemini-2.0-flash-exp", "gemini-2.0-flash-thinking-exp",
|
||||||
}
|
}
|
||||||
|
@ -54,6 +54,10 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
|
|||||||
Category: "HARM_CATEGORY_DANGEROUS_CONTENT",
|
Category: "HARM_CATEGORY_DANGEROUS_CONTENT",
|
||||||
Threshold: config.GeminiSafetySetting,
|
Threshold: config.GeminiSafetySetting,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Category: "HARM_CATEGORY_CIVIC_INTEGRITY",
|
||||||
|
Threshold: config.GeminiSafetySetting,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
GenerationConfig: ChatGenerationConfig{
|
GenerationConfig: ChatGenerationConfig{
|
||||||
Temperature: textRequest.Temperature,
|
Temperature: textRequest.Temperature,
|
||||||
@ -246,7 +250,14 @@ func responseGeminiChat2OpenAI(response *ChatResponse) *openai.TextResponse {
|
|||||||
if candidate.Content.Parts[0].FunctionCall != nil {
|
if candidate.Content.Parts[0].FunctionCall != nil {
|
||||||
choice.Message.ToolCalls = getToolCalls(&candidate)
|
choice.Message.ToolCalls = getToolCalls(&candidate)
|
||||||
} else {
|
} else {
|
||||||
choice.Message.Content = candidate.Content.Parts[0].Text
|
var builder strings.Builder
|
||||||
|
for _, part := range candidate.Content.Parts {
|
||||||
|
if i > 0 {
|
||||||
|
builder.WriteString("\n")
|
||||||
|
}
|
||||||
|
builder.WriteString(part.Text)
|
||||||
|
}
|
||||||
|
choice.Message.Content = builder.String()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
choice.Message.Content = ""
|
choice.Message.Content = ""
|
||||||
|
@ -18,7 +18,7 @@ var ModelList = []string{
|
|||||||
"gemini-pro", "gemini-pro-vision",
|
"gemini-pro", "gemini-pro-vision",
|
||||||
"gemini-1.5-pro-001", "gemini-1.5-flash-001",
|
"gemini-1.5-pro-001", "gemini-1.5-flash-001",
|
||||||
"gemini-1.5-pro-002", "gemini-1.5-flash-002",
|
"gemini-1.5-pro-002", "gemini-1.5-flash-002",
|
||||||
"gemini-2.0-flash-exp",
|
"gemini-2.0-flash-exp", "gemini-2.0-flash-thinking-exp",
|
||||||
}
|
}
|
||||||
|
|
||||||
type Adaptor struct {
|
type Adaptor struct {
|
||||||
|
@ -117,6 +117,7 @@ var ModelRatio = map[string]float64{
|
|||||||
"gemini-1.5-flash": 1,
|
"gemini-1.5-flash": 1,
|
||||||
"gemini-1.5-flash-001": 1,
|
"gemini-1.5-flash-001": 1,
|
||||||
"gemini-2.0-flash-exp": 1,
|
"gemini-2.0-flash-exp": 1,
|
||||||
|
"gemini-2.0-flash-thinking-exp": 1,
|
||||||
"aqa": 1,
|
"aqa": 1,
|
||||||
// https://open.bigmodel.cn/pricing
|
// https://open.bigmodel.cn/pricing
|
||||||
"glm-4": 0.1 * RMB,
|
"glm-4": 0.1 * RMB,
|
||||||
|
Loading…
Reference in New Issue
Block a user