mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-11-11 19:03:43 +08:00
Compare commits
12 Commits
v0.6.6-alp
...
v0.6.6-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2720e1a358 | ||
|
|
71f4403fd5 | ||
|
|
1f76c80553 | ||
|
|
7e027d2bd0 | ||
|
|
30f373b623 | ||
|
|
1c2654320e | ||
|
|
6cffb116b7 | ||
|
|
a84c7b38b7 | ||
|
|
1bd14af47b | ||
|
|
6170b91d1c | ||
|
|
04b49aa0ec | ||
|
|
ef88497f25 |
@@ -87,6 +87,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
|
|||||||
+ [x] [DeepSeek](https://www.deepseek.com/)
|
+ [x] [DeepSeek](https://www.deepseek.com/)
|
||||||
+ [x] [Cloudflare Workers AI](https://developers.cloudflare.com/workers-ai/)
|
+ [x] [Cloudflare Workers AI](https://developers.cloudflare.com/workers-ai/)
|
||||||
+ [x] [DeepL](https://www.deepl.com/)
|
+ [x] [DeepL](https://www.deepl.com/)
|
||||||
|
+ [x] [together.ai](https://www.together.ai/)
|
||||||
2. 支持配置镜像以及众多[第三方代理服务](https://iamazing.cn/page/openai-api-third-party-services)。
|
2. 支持配置镜像以及众多[第三方代理服务](https://iamazing.cn/page/openai-api-third-party-services)。
|
||||||
3. 支持通过**负载均衡**的方式访问多个渠道。
|
3. 支持通过**负载均衡**的方式访问多个渠道。
|
||||||
4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。
|
4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。
|
||||||
|
|||||||
@@ -5,6 +5,15 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/songquanpeng/one-api/common/config"
|
"github.com/songquanpeng/one-api/common/config"
|
||||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||||
"github.com/songquanpeng/one-api/common/logger"
|
"github.com/songquanpeng/one-api/common/logger"
|
||||||
@@ -18,14 +27,6 @@ import (
|
|||||||
"github.com/songquanpeng/one-api/relay/meta"
|
"github.com/songquanpeng/one-api/relay/meta"
|
||||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
)
|
)
|
||||||
@@ -69,6 +70,7 @@ func testChannel(channel *model.Channel) (err error, openaiErr *relaymodel.Error
|
|||||||
adaptor.Init(meta)
|
adaptor.Init(meta)
|
||||||
var modelName string
|
var modelName string
|
||||||
modelList := adaptor.GetModelList()
|
modelList := adaptor.GetModelList()
|
||||||
|
modelMap := channel.GetModelMapping()
|
||||||
if len(modelList) != 0 {
|
if len(modelList) != 0 {
|
||||||
modelName = modelList[0]
|
modelName = modelList[0]
|
||||||
}
|
}
|
||||||
@@ -77,6 +79,9 @@ func testChannel(channel *model.Channel) (err error, openaiErr *relaymodel.Error
|
|||||||
if len(modelNames) > 0 {
|
if len(modelNames) > 0 {
|
||||||
modelName = modelNames[0]
|
modelName = modelNames[0]
|
||||||
}
|
}
|
||||||
|
if modelMap != nil && modelMap[modelName] != "" {
|
||||||
|
modelName = modelMap[modelName]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
request := buildTestRequest()
|
request := buildTestRequest()
|
||||||
request.Model = modelName
|
request.Model = modelName
|
||||||
|
|||||||
@@ -4,6 +4,10 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/songquanpeng/one-api/common"
|
"github.com/songquanpeng/one-api/common"
|
||||||
"github.com/songquanpeng/one-api/common/helper"
|
"github.com/songquanpeng/one-api/common/helper"
|
||||||
@@ -11,9 +15,6 @@ import (
|
|||||||
"github.com/songquanpeng/one-api/common/logger"
|
"github.com/songquanpeng/one-api/common/logger"
|
||||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||||
"github.com/songquanpeng/one-api/relay/model"
|
"github.com/songquanpeng/one-api/relay/model"
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func stopReasonClaude2OpenAI(reason *string) string {
|
func stopReasonClaude2OpenAI(reason *string) string {
|
||||||
@@ -176,10 +177,10 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
|||||||
if len(data) < 6 {
|
if len(data) < 6 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !strings.HasPrefix(data, "data: ") {
|
if !strings.HasPrefix(data, "data:") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
data = strings.TrimPrefix(data, "data: ")
|
data = strings.TrimPrefix(data, "data:")
|
||||||
dataChan <- data
|
dataChan <- data
|
||||||
}
|
}
|
||||||
stopChan <- true
|
stopChan <- true
|
||||||
@@ -192,7 +193,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
|||||||
select {
|
select {
|
||||||
case data := <-dataChan:
|
case data := <-dataChan:
|
||||||
// some implementations may add \r at the end of data
|
// some implementations may add \r at the end of data
|
||||||
data = strings.TrimSuffix(data, "\r")
|
data = strings.TrimSpace(data)
|
||||||
var claudeResponse StreamResponse
|
var claudeResponse StreamResponse
|
||||||
err := json.Unmarshal([]byte(data), &claudeResponse)
|
err := json.Unmarshal([]byte(data), &claudeResponse)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -3,6 +3,9 @@ package gemini
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/songquanpeng/one-api/common/config"
|
"github.com/songquanpeng/one-api/common/config"
|
||||||
"github.com/songquanpeng/one-api/common/helper"
|
"github.com/songquanpeng/one-api/common/helper"
|
||||||
@@ -10,8 +13,6 @@ import (
|
|||||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||||
"github.com/songquanpeng/one-api/relay/meta"
|
"github.com/songquanpeng/one-api/relay/meta"
|
||||||
"github.com/songquanpeng/one-api/relay/model"
|
"github.com/songquanpeng/one-api/relay/model"
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Adaptor struct {
|
type Adaptor struct {
|
||||||
@@ -25,7 +26,7 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
|||||||
version := helper.AssignOrDefault(meta.Config.APIVersion, config.GeminiVersion)
|
version := helper.AssignOrDefault(meta.Config.APIVersion, config.GeminiVersion)
|
||||||
action := "generateContent"
|
action := "generateContent"
|
||||||
if meta.IsStream {
|
if meta.IsStream {
|
||||||
action = "streamGenerateContent"
|
action = "streamGenerateContent?alt=sse"
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%s/%s/models/%s:%s", meta.BaseURL, version, meta.ActualModelName, action), nil
|
return fmt.Sprintf("%s/%s/models/%s:%s", meta.BaseURL, version, meta.ActualModelName, action), nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -232,8 +232,6 @@ func streamResponseGeminiChat2OpenAI(geminiResponse *ChatResponse) *openai.ChatC
|
|||||||
|
|
||||||
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, string) {
|
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, string) {
|
||||||
responseText := ""
|
responseText := ""
|
||||||
dataChan := make(chan string)
|
|
||||||
stopChan := make(chan bool)
|
|
||||||
scanner := bufio.NewScanner(resp.Body)
|
scanner := bufio.NewScanner(resp.Body)
|
||||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||||
if atEOF && len(data) == 0 {
|
if atEOF && len(data) == 0 {
|
||||||
@@ -247,14 +245,16 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
|||||||
}
|
}
|
||||||
return 0, nil, nil
|
return 0, nil, nil
|
||||||
})
|
})
|
||||||
|
dataChan := make(chan string)
|
||||||
|
stopChan := make(chan bool)
|
||||||
go func() {
|
go func() {
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
data := scanner.Text()
|
data := scanner.Text()
|
||||||
data = strings.TrimSpace(data)
|
data = strings.TrimSpace(data)
|
||||||
if !strings.HasPrefix(data, "\"text\": \"") {
|
if !strings.HasPrefix(data, "data: ") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
data = strings.TrimPrefix(data, "\"text\": \"")
|
data = strings.TrimPrefix(data, "data: ")
|
||||||
data = strings.TrimSuffix(data, "\"")
|
data = strings.TrimSuffix(data, "\"")
|
||||||
dataChan <- data
|
dataChan <- data
|
||||||
}
|
}
|
||||||
@@ -264,23 +264,17 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
|||||||
c.Stream(func(w io.Writer) bool {
|
c.Stream(func(w io.Writer) bool {
|
||||||
select {
|
select {
|
||||||
case data := <-dataChan:
|
case data := <-dataChan:
|
||||||
// this is used to prevent annoying \ related format bug
|
var geminiResponse ChatResponse
|
||||||
data = fmt.Sprintf("{\"content\": \"%s\"}", data)
|
err := json.Unmarshal([]byte(data), &geminiResponse)
|
||||||
type dummyStruct struct {
|
if err != nil {
|
||||||
Content string `json:"content"`
|
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
var dummy dummyStruct
|
response := streamResponseGeminiChat2OpenAI(&geminiResponse)
|
||||||
err := json.Unmarshal([]byte(data), &dummy)
|
if response == nil {
|
||||||
responseText += dummy.Content
|
return true
|
||||||
var choice openai.ChatCompletionsStreamResponseChoice
|
|
||||||
choice.Delta.Content = dummy.Content
|
|
||||||
response := openai.ChatCompletionsStreamResponse{
|
|
||||||
Id: fmt.Sprintf("chatcmpl-%s", random.GetUUID()),
|
|
||||||
Object: "chat.completion.chunk",
|
|
||||||
Created: helper.GetTimestamp(),
|
|
||||||
Model: "gemini-pro",
|
|
||||||
Choices: []openai.ChatCompletionsStreamResponseChoice{choice},
|
|
||||||
}
|
}
|
||||||
|
responseText += response.Choices[0].Delta.StringContent()
|
||||||
jsonResponse, err := json.Marshal(response)
|
jsonResponse, err := json.Marshal(response)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.SysError("error marshalling stream response: " + err.Error())
|
logger.SysError("error marshalling stream response: " + err.Error())
|
||||||
|
|||||||
@@ -1,7 +1,11 @@
|
|||||||
package minimax
|
package minimax
|
||||||
|
|
||||||
|
// https://www.minimaxi.com/document/guides/chat-model/V2?id=65e0736ab2845de20908e2dd
|
||||||
|
|
||||||
var ModelList = []string{
|
var ModelList = []string{
|
||||||
"abab5.5s-chat",
|
"abab6.5-chat",
|
||||||
"abab5.5-chat",
|
"abab6.5s-chat",
|
||||||
"abab6-chat",
|
"abab6-chat",
|
||||||
|
"abab5.5-chat",
|
||||||
|
"abab5.5s-chat",
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/songquanpeng/one-api/common"
|
"github.com/songquanpeng/one-api/common"
|
||||||
|
"github.com/songquanpeng/one-api/common/image"
|
||||||
"github.com/songquanpeng/one-api/common/logger"
|
"github.com/songquanpeng/one-api/common/logger"
|
||||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||||
"github.com/songquanpeng/one-api/relay/constant"
|
"github.com/songquanpeng/one-api/relay/constant"
|
||||||
@@ -32,9 +33,22 @@ func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
|
|||||||
Stream: request.Stream,
|
Stream: request.Stream,
|
||||||
}
|
}
|
||||||
for _, message := range request.Messages {
|
for _, message := range request.Messages {
|
||||||
|
openaiContent := message.ParseContent()
|
||||||
|
var imageUrls []string
|
||||||
|
var contentText string
|
||||||
|
for _, part := range openaiContent {
|
||||||
|
switch part.Type {
|
||||||
|
case model.ContentTypeText:
|
||||||
|
contentText = part.Text
|
||||||
|
case model.ContentTypeImageURL:
|
||||||
|
_, data, _ := image.GetImageFromUrl(part.ImageURL.Url)
|
||||||
|
imageUrls = append(imageUrls, data)
|
||||||
|
}
|
||||||
|
}
|
||||||
ollamaRequest.Messages = append(ollamaRequest.Messages, Message{
|
ollamaRequest.Messages = append(ollamaRequest.Messages, Message{
|
||||||
Role: message.Role,
|
Role: message.Role,
|
||||||
Content: message.StringContent(),
|
Content: contentText,
|
||||||
|
Images: imageUrls,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return &ollamaRequest
|
return &ollamaRequest
|
||||||
|
|||||||
@@ -86,9 +86,13 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Met
|
|||||||
if meta.IsStream {
|
if meta.IsStream {
|
||||||
var responseText string
|
var responseText string
|
||||||
err, responseText, usage = StreamHandler(c, resp, meta.Mode)
|
err, responseText, usage = StreamHandler(c, resp, meta.Mode)
|
||||||
if usage == nil {
|
if usage == nil || usage.TotalTokens == 0 {
|
||||||
usage = ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
|
usage = ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
|
||||||
}
|
}
|
||||||
|
if usage.TotalTokens != 0 && usage.PromptTokens == 0 { // some channels don't return prompt tokens & completion tokens
|
||||||
|
usage.PromptTokens = meta.PromptTokens
|
||||||
|
usage.CompletionTokens = usage.TotalTokens - meta.PromptTokens
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
switch meta.Mode {
|
switch meta.Mode {
|
||||||
case relaymode.ImagesGenerations:
|
case relaymode.ImagesGenerations:
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/songquanpeng/one-api/relay/adaptor/mistral"
|
"github.com/songquanpeng/one-api/relay/adaptor/mistral"
|
||||||
"github.com/songquanpeng/one-api/relay/adaptor/moonshot"
|
"github.com/songquanpeng/one-api/relay/adaptor/moonshot"
|
||||||
"github.com/songquanpeng/one-api/relay/adaptor/stepfun"
|
"github.com/songquanpeng/one-api/relay/adaptor/stepfun"
|
||||||
|
"github.com/songquanpeng/one-api/relay/adaptor/togetherai"
|
||||||
"github.com/songquanpeng/one-api/relay/channeltype"
|
"github.com/songquanpeng/one-api/relay/channeltype"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -24,6 +25,7 @@ var CompatibleChannels = []int{
|
|||||||
channeltype.LingYiWanWu,
|
channeltype.LingYiWanWu,
|
||||||
channeltype.StepFun,
|
channeltype.StepFun,
|
||||||
channeltype.DeepSeek,
|
channeltype.DeepSeek,
|
||||||
|
channeltype.TogetherAI,
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
||||||
@@ -48,6 +50,8 @@ func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
|||||||
return "stepfun", stepfun.ModelList
|
return "stepfun", stepfun.ModelList
|
||||||
case channeltype.DeepSeek:
|
case channeltype.DeepSeek:
|
||||||
return "deepseek", deepseek.ModelList
|
return "deepseek", deepseek.ModelList
|
||||||
|
case channeltype.TogetherAI:
|
||||||
|
return "together.ai", togetherai.ModelList
|
||||||
default:
|
default:
|
||||||
return "openai", ModelList
|
return "openai", ModelList
|
||||||
}
|
}
|
||||||
|
|||||||
10
relay/adaptor/togetherai/constants.go
Normal file
10
relay/adaptor/togetherai/constants.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package togetherai
|
||||||
|
|
||||||
|
// https://docs.together.ai/docs/inference-models
|
||||||
|
|
||||||
|
var ModelList = []string{
|
||||||
|
"meta-llama/Llama-3-70b-chat-hf",
|
||||||
|
"deepseek-ai/deepseek-coder-33b-instruct",
|
||||||
|
"mistralai/Mixtral-8x22B-Instruct-v0.1",
|
||||||
|
"Qwen/Qwen1.5-72B-Chat",
|
||||||
|
}
|
||||||
@@ -62,8 +62,8 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
|
|||||||
}
|
}
|
||||||
switch relayMode {
|
switch relayMode {
|
||||||
case relaymode.Embeddings:
|
case relaymode.Embeddings:
|
||||||
baiduEmbeddingRequest := ConvertEmbeddingRequest(*request)
|
baiduEmbeddingRequest, err := ConvertEmbeddingRequest(*request)
|
||||||
return baiduEmbeddingRequest, nil
|
return baiduEmbeddingRequest, err
|
||||||
default:
|
default:
|
||||||
// TopP (0.0, 1.0)
|
// TopP (0.0, 1.0)
|
||||||
request.TopP = math.Min(0.99, request.TopP)
|
request.TopP = math.Min(0.99, request.TopP)
|
||||||
@@ -129,11 +129,15 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Met
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func ConvertEmbeddingRequest(request model.GeneralOpenAIRequest) *EmbeddingRequest {
|
func ConvertEmbeddingRequest(request model.GeneralOpenAIRequest) (*EmbeddingRequest, error) {
|
||||||
return &EmbeddingRequest{
|
inputs := request.ParseInput()
|
||||||
Model: "embedding-2",
|
if len(inputs) != 1 {
|
||||||
Input: request.Input.(string),
|
return nil, errors.New("invalid input length, zhipu only support one input")
|
||||||
}
|
}
|
||||||
|
return &EmbeddingRequest{
|
||||||
|
Model: request.Model,
|
||||||
|
Input: inputs[0],
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Adaptor) GetModelList() []string {
|
func (a *Adaptor) GetModelList() []string {
|
||||||
|
|||||||
@@ -138,6 +138,8 @@ var ModelRatio = map[string]float64{
|
|||||||
"Baichuan2-Turbo-192k": 0.016 * RMB,
|
"Baichuan2-Turbo-192k": 0.016 * RMB,
|
||||||
"Baichuan2-53B": 0.02 * RMB,
|
"Baichuan2-53B": 0.02 * RMB,
|
||||||
// https://api.minimax.chat/document/price
|
// https://api.minimax.chat/document/price
|
||||||
|
"abab6.5-chat": 0.03 * RMB,
|
||||||
|
"abab6.5s-chat": 0.01 * RMB,
|
||||||
"abab6-chat": 0.1 * RMB,
|
"abab6-chat": 0.1 * RMB,
|
||||||
"abab5.5-chat": 0.015 * RMB,
|
"abab5.5-chat": 0.015 * RMB,
|
||||||
"abab5.5s-chat": 0.005 * RMB,
|
"abab5.5s-chat": 0.005 * RMB,
|
||||||
|
|||||||
@@ -40,6 +40,7 @@ const (
|
|||||||
DeepSeek
|
DeepSeek
|
||||||
Cloudflare
|
Cloudflare
|
||||||
DeepL
|
DeepL
|
||||||
|
TogetherAI
|
||||||
|
|
||||||
Dummy
|
Dummy
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -40,6 +40,7 @@ var ChannelBaseURLs = []string{
|
|||||||
"https://api.deepseek.com", // 36
|
"https://api.deepseek.com", // 36
|
||||||
"https://api.cloudflare.com", // 37
|
"https://api.cloudflare.com", // 37
|
||||||
"https://api-free.deepl.com", // 38
|
"https://api-free.deepl.com", // 38
|
||||||
|
"https://api.together.xyz", // 39
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
@@ -53,6 +53,16 @@ func (e GeneralErrorResponse) ToMessage() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func RelayErrorHandler(resp *http.Response) (ErrorWithStatusCode *model.ErrorWithStatusCode) {
|
func RelayErrorHandler(resp *http.Response) (ErrorWithStatusCode *model.ErrorWithStatusCode) {
|
||||||
|
if resp == nil {
|
||||||
|
return &model.ErrorWithStatusCode{
|
||||||
|
StatusCode: 500,
|
||||||
|
Error: model.Error{
|
||||||
|
Message: "resp is nil",
|
||||||
|
Type: "upstream_error",
|
||||||
|
Code: "bad_response",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
ErrorWithStatusCode = &model.ErrorWithStatusCode{
|
ErrorWithStatusCode = &model.ErrorWithStatusCode{
|
||||||
StatusCode: resp.StatusCode,
|
StatusCode: resp.StatusCode,
|
||||||
Error: model.Error{
|
Error: model.Error{
|
||||||
|
|||||||
@@ -125,9 +125,9 @@ func getPromptTokens(textRequest *relaymodel.GeneralOpenAIRequest, relayMode int
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getPreConsumedQuota(textRequest *relaymodel.GeneralOpenAIRequest, promptTokens int, ratio float64) int64 {
|
func getPreConsumedQuota(textRequest *relaymodel.GeneralOpenAIRequest, promptTokens int, ratio float64) int64 {
|
||||||
preConsumedTokens := config.PreConsumedQuota
|
preConsumedTokens := config.PreConsumedQuota + int64(promptTokens)
|
||||||
if textRequest.MaxTokens != 0 {
|
if textRequest.MaxTokens != 0 {
|
||||||
preConsumedTokens = int64(promptTokens) + int64(textRequest.MaxTokens)
|
preConsumedTokens += int64(textRequest.MaxTokens)
|
||||||
}
|
}
|
||||||
return int64(float64(preConsumedTokens) * ratio)
|
return int64(float64(preConsumedTokens) * ratio)
|
||||||
}
|
}
|
||||||
@@ -208,6 +208,9 @@ func getMappedModelName(modelName string, mapping map[string]string) (string, bo
|
|||||||
|
|
||||||
func isErrorHappened(meta *meta.Meta, resp *http.Response) bool {
|
func isErrorHappened(meta *meta.Meta, resp *http.Response) bool {
|
||||||
if resp == nil {
|
if resp == nil {
|
||||||
|
if meta.ChannelType == channeltype.AwsClaude {
|
||||||
|
return false
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
|||||||
@@ -143,6 +143,12 @@ export const CHANNEL_OPTIONS = {
|
|||||||
value: 38,
|
value: 38,
|
||||||
color: 'primary'
|
color: 'primary'
|
||||||
},
|
},
|
||||||
|
39: {
|
||||||
|
key: 39,
|
||||||
|
text: 'together.ai',
|
||||||
|
value: 39,
|
||||||
|
color: 'primary'
|
||||||
|
},
|
||||||
8: {
|
8: {
|
||||||
key: 8,
|
key: 8,
|
||||||
text: '自定义渠道',
|
text: '自定义渠道',
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ export const CHANNEL_OPTIONS = [
|
|||||||
{key: 36, text: 'DeepSeek', value: 36, color: 'black'},
|
{key: 36, text: 'DeepSeek', value: 36, color: 'black'},
|
||||||
{key: 37, text: 'Cloudflare', value: 37, color: 'orange'},
|
{key: 37, text: 'Cloudflare', value: 37, color: 'orange'},
|
||||||
{key: 38, text: 'DeepL', value: 38, color: 'black'},
|
{key: 38, text: 'DeepL', value: 38, color: 'black'},
|
||||||
|
{key: 39, text: 'together.ai', value: 39, color: 'blue'},
|
||||||
{key: 8, text: '自定义渠道', value: 8, color: 'pink'},
|
{key: 8, text: '自定义渠道', value: 8, color: 'pink'},
|
||||||
{key: 22, text: '知识库:FastGPT', value: 22, color: 'blue'},
|
{key: 22, text: '知识库:FastGPT', value: 22, color: 'blue'},
|
||||||
{key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple'},
|
{key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple'},
|
||||||
|
|||||||
Reference in New Issue
Block a user