Merge branch 'main' into doubao-cache

This commit is contained in:
Laoshancun
2025-02-08 14:04:26 +08:00
committed by GitHub
107 changed files with 7220 additions and 3616 deletions

View File

@@ -4,6 +4,7 @@ var ModelList = []string{
"claude-instant-1.2", "claude-2.0", "claude-2.1",
"claude-3-haiku-20240307",
"claude-3-5-haiku-20241022",
"claude-3-5-haiku-latest",
"claude-3-sonnet-20240229",
"claude-3-opus-20240229",
"claude-3-5-sonnet-20240620",

View File

@@ -2,5 +2,5 @@ package deepseek
var ModelList = []string{
"deepseek-chat",
"deepseek-coder",
"deepseek-reasoner",
}

View File

@@ -7,7 +7,7 @@ import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
channelhelper "github.com/songquanpeng/one-api/relay/adaptor"
"github.com/songquanpeng/one-api/relay/adaptor/openai"
@@ -24,8 +24,13 @@ func (a *Adaptor) Init(meta *meta.Meta) {
}
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
defaultVersion := config.GeminiVersion
if meta.ActualModelName == "gemini-2.0-flash-exp" {
var defaultVersion string
switch meta.ActualModelName {
case "gemini-2.0-flash-exp",
"gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-thinking-exp-01-21":
defaultVersion = "v1beta"
default:
defaultVersion = "v1beta"
}

View File

@@ -7,5 +7,5 @@ var ModelList = []string{
"gemini-1.5-flash", "gemini-1.5-pro",
"text-embedding-004", "aqa",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-thinking-exp", "gemini-2.0-flash-thinking-exp-01-21",
}

View File

@@ -3,7 +3,6 @@ package groq
// https://console.groq.com/docs/models
var ModelList = []string{
"gemma-7b-it",
"gemma2-9b-it",
"llama-3.1-70b-versatile",
"llama-3.1-8b-instant",
@@ -11,7 +10,6 @@ var ModelList = []string{
"llama-3.2-11b-vision-preview",
"llama-3.2-1b-preview",
"llama-3.2-3b-preview",
"llama-3.2-11b-vision-preview",
"llama-3.2-90b-text-preview",
"llama-3.2-90b-vision-preview",
"llama-guard-3-8b",
@@ -24,4 +22,6 @@ var ModelList = []string{
"distil-whisper-large-v3-en",
"whisper-large-v3",
"whisper-large-v3-turbo",
"deepseek-r1-distill-llama-70b-specdec",
"deepseek-r1-distill-llama-70b",
}

View File

@@ -3,14 +3,16 @@ package openai
import (
"errors"
"fmt"
"math"
"strings"
"github.com/pkoukk/tiktoken-go"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/image"
"github.com/songquanpeng/one-api/common/logger"
billingratio "github.com/songquanpeng/one-api/relay/billing/ratio"
"github.com/songquanpeng/one-api/relay/model"
"math"
"strings"
)
// tokenEncoderMap won't grow after initialization
@@ -21,7 +23,8 @@ func InitTokenEncoders() {
logger.SysLog("initializing token encoders")
gpt35TokenEncoder, err := tiktoken.EncodingForModel("gpt-3.5-turbo")
if err != nil {
logger.FatalLog(fmt.Sprintf("failed to get gpt-3.5-turbo token encoder: %s", err.Error()))
logger.FatalLog(fmt.Sprintf("failed to get gpt-3.5-turbo token encoder: %s, "+
"if you are using in offline environment, please set TIKTOKEN_CACHE_DIR to use exsited files, check this link for more information: https://stackoverflow.com/questions/76106366/how-to-use-tiktoken-in-offline-mode-computer ", err.Error()))
}
defaultTokenEncoder = gpt35TokenEncoder
gpt4oTokenEncoder, err := tiktoken.EncodingForModel("gpt-4o")

View File

@@ -2,16 +2,19 @@ package tencent
import (
"errors"
"io"
"net/http"
"strconv"
"strings"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/relay/adaptor"
"github.com/songquanpeng/one-api/relay/adaptor/openai"
"github.com/songquanpeng/one-api/relay/meta"
"github.com/songquanpeng/one-api/relay/model"
"io"
"net/http"
"strconv"
"strings"
"github.com/songquanpeng/one-api/relay/relaymode"
)
// https://cloud.tencent.com/document/api/1729/101837
@@ -52,10 +55,18 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
if err != nil {
return nil, err
}
tencentRequest := ConvertRequest(*request)
var convertedRequest any
switch relayMode {
case relaymode.Embeddings:
a.Action = "GetEmbedding"
convertedRequest = ConvertEmbeddingRequest(*request)
default:
a.Action = "ChatCompletions"
convertedRequest = ConvertRequest(*request)
}
// we have to calculate the sign here
a.Sign = GetSign(*tencentRequest, a, secretId, secretKey)
return tencentRequest, nil
a.Sign = GetSign(convertedRequest, a, secretId, secretKey)
return convertedRequest, nil
}
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
@@ -75,7 +86,12 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Met
err, responseText = StreamHandler(c, resp)
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
} else {
err, usage = Handler(c, resp)
switch meta.Mode {
case relaymode.Embeddings:
err, usage = EmbeddingHandler(c, resp)
default:
err, usage = Handler(c, resp)
}
}
return
}

View File

@@ -6,4 +6,5 @@ var ModelList = []string{
"hunyuan-standard-256K",
"hunyuan-pro",
"hunyuan-vision",
"hunyuan-embedding",
}

View File

@@ -8,7 +8,6 @@ import (
"encoding/json"
"errors"
"fmt"
"github.com/songquanpeng/one-api/common/render"
"io"
"net/http"
"strconv"
@@ -16,11 +15,14 @@ import (
"time"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/conv"
"github.com/songquanpeng/one-api/common/ctxkey"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/common/random"
"github.com/songquanpeng/one-api/common/render"
"github.com/songquanpeng/one-api/relay/adaptor/openai"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/model"
@@ -44,8 +46,68 @@ func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
}
}
func ConvertEmbeddingRequest(request model.GeneralOpenAIRequest) *EmbeddingRequest {
return &EmbeddingRequest{
InputList: request.ParseInput(),
}
}
func EmbeddingHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
var tencentResponseP EmbeddingResponseP
err := json.NewDecoder(resp.Body).Decode(&tencentResponseP)
if err != nil {
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
}
err = resp.Body.Close()
if err != nil {
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
}
tencentResponse := tencentResponseP.Response
if tencentResponse.Error.Code != "" {
return &model.ErrorWithStatusCode{
Error: model.Error{
Message: tencentResponse.Error.Message,
Code: tencentResponse.Error.Code,
},
StatusCode: resp.StatusCode,
}, nil
}
requestModel := c.GetString(ctxkey.RequestModel)
fullTextResponse := embeddingResponseTencent2OpenAI(&tencentResponse)
fullTextResponse.Model = requestModel
jsonResponse, err := json.Marshal(fullTextResponse)
if err != nil {
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
}
c.Writer.Header().Set("Content-Type", "application/json")
c.Writer.WriteHeader(resp.StatusCode)
_, err = c.Writer.Write(jsonResponse)
return nil, &fullTextResponse.Usage
}
func embeddingResponseTencent2OpenAI(response *EmbeddingResponse) *openai.EmbeddingResponse {
openAIEmbeddingResponse := openai.EmbeddingResponse{
Object: "list",
Data: make([]openai.EmbeddingResponseItem, 0, len(response.Data)),
Model: "hunyuan-embedding",
Usage: model.Usage{TotalTokens: response.EmbeddingUsage.TotalTokens},
}
for _, item := range response.Data {
openAIEmbeddingResponse.Data = append(openAIEmbeddingResponse.Data, openai.EmbeddingResponseItem{
Object: item.Object,
Index: item.Index,
Embedding: item.Embedding,
})
}
return &openAIEmbeddingResponse
}
func responseTencent2OpenAI(response *ChatResponse) *openai.TextResponse {
fullTextResponse := openai.TextResponse{
Id: response.ReqID,
Object: "chat.completion",
Created: helper.GetTimestamp(),
Usage: model.Usage{
@@ -148,7 +210,7 @@ func Handler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
}
TencentResponse = responseP.Response
if TencentResponse.Error.Code != 0 {
if TencentResponse.Error.Code != "" {
return &model.ErrorWithStatusCode{
Error: model.Error{
Message: TencentResponse.Error.Message,
@@ -195,7 +257,7 @@ func hmacSha256(s, key string) string {
return string(hashed.Sum(nil))
}
func GetSign(req ChatRequest, adaptor *Adaptor, secId, secKey string) string {
func GetSign(req any, adaptor *Adaptor, secId, secKey string) string {
// build canonical request string
host := "hunyuan.tencentcloudapi.com"
httpRequestMethod := "POST"

View File

@@ -35,16 +35,16 @@ type ChatRequest struct {
// 1. 影响输出文本的多样性,取值越大,生成文本的多样性越强。
// 2. 取值区间为 [0.0, 1.0],未传值时使用各模型推荐值。
// 3. 非必要不建议使用,不合理的取值会影响效果。
TopP *float64 `json:"TopP"`
TopP *float64 `json:"TopP,omitempty"`
// 说明:
// 1. 较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定。
// 2. 取值区间为 [0.0, 2.0],未传值时使用各模型推荐值。
// 3. 非必要不建议使用,不合理的取值会影响效果。
Temperature *float64 `json:"Temperature"`
Temperature *float64 `json:"Temperature,omitempty"`
}
type Error struct {
Code int `json:"Code"`
Code string `json:"Code"`
Message string `json:"Message"`
}
@@ -61,15 +61,41 @@ type ResponseChoices struct {
}
type ChatResponse struct {
Choices []ResponseChoices `json:"Choices,omitempty"` // 结果
Created int64 `json:"Created,omitempty"` // unix 时间戳的字符串
Id string `json:"Id,omitempty"` // 会话 id
Usage Usage `json:"Usage,omitempty"` // token 数量
Error Error `json:"Error,omitempty"` // 错误信息 注意:此字段可能返回 null表示取不到有效值
Note string `json:"Note,omitempty"` // 注释
ReqID string `json:"Req_id,omitempty"` // 唯一请求 Id每次请求都会返回。用于反馈接口入参
Choices []ResponseChoices `json:"Choices,omitempty"` // 结果
Created int64 `json:"Created,omitempty"` // unix 时间戳的字符串
Id string `json:"Id,omitempty"` // 会话 id
Usage Usage `json:"Usage,omitempty"` // token 数量
Error Error `json:"Error,omitempty"` // 错误信息 注意:此字段可能返回 null表示取不到有效值
Note string `json:"Note,omitempty"` // 注释
ReqID string `json:"RequestId,omitempty"` // 唯一请求 Id每次请求都会返回。用于反馈接口入参
}
type ChatResponseP struct {
Response ChatResponse `json:"Response,omitempty"`
}
type EmbeddingRequest struct {
InputList []string `json:"InputList"`
}
type EmbeddingData struct {
Embedding []float64 `json:"Embedding"`
Index int `json:"Index"`
Object string `json:"Object"`
}
type EmbeddingUsage struct {
PromptTokens int `json:"PromptTokens"`
TotalTokens int `json:"TotalTokens"`
}
type EmbeddingResponse struct {
Data []EmbeddingData `json:"Data"`
EmbeddingUsage EmbeddingUsage `json:"Usage,omitempty"`
RequestId string `json:"RequestId,omitempty"`
Error Error `json:"Error,omitempty"`
}
type EmbeddingResponseP struct {
Response EmbeddingResponse `json:"Response,omitempty"`
}

View File

@@ -18,7 +18,8 @@ var ModelList = []string{
"gemini-pro", "gemini-pro-vision",
"gemini-1.5-pro-001", "gemini-1.5-flash-001",
"gemini-1.5-pro-002", "gemini-1.5-flash-002",
"gemini-2.0-flash-exp", "gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-thinking-exp", "gemini-2.0-flash-thinking-exp-01-21",
}
type Adaptor struct {

View File

@@ -1,7 +1,14 @@
package zhipu
// https://open.bigmodel.cn/pricing
var ModelList = []string{
"chatglm_turbo", "chatglm_pro", "chatglm_std", "chatglm_lite",
"glm-4", "glm-4v", "glm-3-turbo", "embedding-2",
"cogview-3",
"glm-zero-preview", "glm-4-plus", "glm-4-0520", "glm-4-airx",
"glm-4-air", "glm-4-long", "glm-4-flashx", "glm-4-flash",
"glm-4", "glm-3-turbo",
"glm-4v-plus", "glm-4v", "glm-4v-flash",
"cogview-3-plus", "cogview-3", "cogview-3-flash",
"cogviewx", "cogviewx-flash",
"charglm-4", "emohaa", "codegeex-4",
"embedding-2", "embedding-3",
}