mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-09-17 01:06:37 +08:00
93 lines
2.7 KiB
Go
93 lines
2.7 KiB
Go
package vertexai
|
|
|
|
import (
|
|
"net/http"
|
|
"net/url"
|
|
"strconv"
|
|
"strings"
|
|
|
|
"github.com/gin-gonic/gin"
|
|
"github.com/pkg/errors"
|
|
"github.com/songquanpeng/one-api/common/ctxkey"
|
|
"github.com/songquanpeng/one-api/relay/adaptor/gemini"
|
|
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
|
"github.com/songquanpeng/one-api/relay/relaymode"
|
|
|
|
"github.com/songquanpeng/one-api/relay/meta"
|
|
"github.com/songquanpeng/one-api/relay/model"
|
|
)
|
|
|
|
var ModelList = []string{
|
|
"gemini-pro", "gemini-pro-vision",
|
|
"gemini-exp-1206",
|
|
"gemini-1.5-pro-001", "gemini-1.5-pro-002",
|
|
"gemini-1.5-flash-001", "gemini-1.5-flash-002",
|
|
"gemini-2.0-flash-exp", "gemini-2.0-flash-001",
|
|
"gemini-2.0-flash-lite-preview-02-05",
|
|
"gemini-2.0-flash-thinking-exp-01-21",
|
|
}
|
|
|
|
type Adaptor struct {
|
|
}
|
|
|
|
func (a *Adaptor) parseGeminiChatGenerationThinking(model string) (string, *gemini.ThinkingConfig) {
|
|
thinkingConfig := &gemini.ThinkingConfig{
|
|
IncludeThoughts: false,
|
|
ThinkingBudget: 0,
|
|
}
|
|
modelName := model
|
|
if strings.Contains(model, "?") {
|
|
parts := strings.Split(model, "?")
|
|
_modelName := parts[0]
|
|
if len(parts) >= 2 {
|
|
modelOptions, err := url.ParseQuery(parts[1])
|
|
if err == nil && modelOptions != nil {
|
|
modelName = _modelName
|
|
hasThinkingFlag := modelOptions.Has("thinking")
|
|
if hasThinkingFlag {
|
|
thinkingConfig.IncludeThoughts = modelOptions.Get("thinking") == "1"
|
|
}
|
|
thinkingBudget := modelOptions.Get("thinking_budget")
|
|
if thinkingBudget != "" {
|
|
thinkingBudgetInt, err := strconv.Atoi(thinkingBudget)
|
|
if err == nil {
|
|
thinkingConfig.ThinkingBudget = thinkingBudgetInt
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return modelName, thinkingConfig
|
|
}
|
|
|
|
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
|
|
if request == nil {
|
|
return nil, errors.New("request is nil")
|
|
}
|
|
modelName, thinkingConfig := a.parseGeminiChatGenerationThinking(request.Model)
|
|
request.Model = modelName
|
|
geminiRequest := gemini.ConvertRequest(*request)
|
|
if thinkingConfig != nil {
|
|
geminiRequest.GenerationConfig.ThinkingConfig = thinkingConfig
|
|
}
|
|
c.Set(ctxkey.RequestModel, modelName)
|
|
c.Set(ctxkey.ConvertedRequest, geminiRequest)
|
|
return geminiRequest, nil
|
|
}
|
|
|
|
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
|
|
if meta.IsStream {
|
|
var responseText string
|
|
err, responseText = gemini.StreamHandler(c, resp)
|
|
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
|
|
} else {
|
|
switch meta.Mode {
|
|
case relaymode.Embeddings:
|
|
err, usage = gemini.EmbeddingHandler(c, resp)
|
|
default:
|
|
err, usage = gemini.Handler(c, resp, meta.PromptTokens, meta.ActualModelName)
|
|
}
|
|
}
|
|
return
|
|
}
|