mirror of
https://github.com/songquanpeng/one-api.git
synced 2026-02-19 12:24:25 +08:00
Compare commits
4 Commits
80dbb3e762
...
48e0f2f244
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
48e0f2f244 | ||
|
|
761ee32d19 | ||
|
|
c426b64b3d | ||
|
|
8df4a2670b |
@@ -72,7 +72,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
|
||||
+ [x] [Anthropic Claude 系列模型](https://anthropic.com) (支持 AWS Claude)
|
||||
+ [x] [Google PaLM2/Gemini 系列模型](https://developers.generativeai.google)
|
||||
+ [x] [Mistral 系列模型](https://mistral.ai/)
|
||||
+ [x] [字节跳动豆包大模型](https://console.volcengine.com/ark/region:ark+cn-beijing/model)
|
||||
+ [x] [字节跳动豆包大模型(火山引擎)](https://www.volcengine.com/experience/ark?utm_term=202502dsinvite&ac=DSASUQY5&rc=2QXCA1VI)
|
||||
+ [x] [百度文心一言系列模型](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html)
|
||||
+ [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html)
|
||||
+ [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html)
|
||||
|
||||
@@ -8,19 +8,18 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/image"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/common/random"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
"github.com/songquanpeng/one-api/relay/constant"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// https://ai.google.dev/docs/gemini_api_overview?hl=zh-cn
|
||||
@@ -61,12 +60,10 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
|
||||
},
|
||||
},
|
||||
GenerationConfig: ChatGenerationConfig{
|
||||
Temperature: textRequest.Temperature,
|
||||
TopP: textRequest.TopP,
|
||||
MaxOutputTokens: textRequest.MaxTokens,
|
||||
ResponseModalities: []string{
|
||||
"TEXT", "IMAGE",
|
||||
},
|
||||
Temperature: textRequest.Temperature,
|
||||
TopP: textRequest.TopP,
|
||||
MaxOutputTokens: textRequest.MaxTokens,
|
||||
ResponseModalities: geminiv2.GetModelModalities(textRequest.Model),
|
||||
},
|
||||
}
|
||||
if textRequest.ResponseFormat != nil {
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package geminiv2
|
||||
|
||||
import "strings"
|
||||
|
||||
// https://ai.google.dev/models/gemini
|
||||
|
||||
var ModelList = []string{
|
||||
@@ -14,3 +16,17 @@ var ModelList = []string{
|
||||
"gemini-2.0-pro-exp-02-05",
|
||||
"gemini-2.0-flash-exp-image-generation",
|
||||
}
|
||||
|
||||
const (
|
||||
ModalityText = "TEXT"
|
||||
ModalityImage = "IMAGE"
|
||||
)
|
||||
|
||||
// GetModelModalities returns the modalities of the model.
|
||||
func GetModelModalities(model string) []string {
|
||||
if strings.Contains(model, "-image-generation") {
|
||||
return []string{ModalityText, ModalityImage}
|
||||
}
|
||||
|
||||
return []string{ModalityText}
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ type GeneralOpenAIRequest struct {
|
||||
MaxCompletionTokens *int `json:"max_completion_tokens,omitempty"`
|
||||
// N is how many chat completion choices to generate for each input message,
|
||||
// default to 1.
|
||||
N *int `json:"n,omitempty" binding:"omitempty,min=1"`
|
||||
N *int `json:"n,omitempty" binding:"omitempty,min=0"`
|
||||
// ReasoningEffort constrains effort on reasoning for reasoning models, reasoning models only.
|
||||
ReasoningEffort *string `json:"reasoning_effort,omitempty" binding:"omitempty,oneof=low medium high"`
|
||||
// Modalities currently the model only programmatically allows modalities = [“text”, “audio”]
|
||||
|
||||
Reference in New Issue
Block a user