Merge remote-tracking branch 'origin/upstream/main'

This commit is contained in:
Laisky.Cai
2024-02-19 07:23:30 +00:00
71 changed files with 2003 additions and 2097 deletions

View File

@@ -63,6 +63,7 @@ const (
ChannelTypeFastGPT = 22
ChannelTypeTencent = 23
ChannelTypeGemini = 24
ChannelTypeMoonshot = 25
)
var ChannelBaseURLs = []string{
@@ -91,4 +92,13 @@ var ChannelBaseURLs = []string{
"https://fastgpt.run/api/openapi", // 22
"https://hunyuan.cloud.tencent.com", // 23
"https://generativelanguage.googleapis.com", // 24
"https://api.moonshot.cn", // 25
}
const (
ConfigKeyPrefix = "cfg_"
ConfigKeyAPIVersion = ConfigKeyPrefix + "api_version"
ConfigKeyLibraryID = ConfigKeyPrefix + "library_id"
ConfigKeyPlugin = ConfigKeyPrefix + "plugin"
)

View File

@@ -30,6 +30,12 @@ var DalleImagePromptLengthLimitations = map[string]int{
"dall-e-3": 4000,
}
const (
USD2RMB = 7
USD = 500 // $0.002 = 1 -> $1 = 500
RMB = USD / USD2RMB
)
// ModelRatio
// https://platform.openai.com/docs/models/model-endpoint-compatibility
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Blfmc9dlf
@@ -38,57 +44,60 @@ var DalleImagePromptLengthLimitations = map[string]int{
// 1 === $0.002 / 1K tokens
// 1 === ¥0.014 / 1k tokens
var ModelRatio = map[string]float64{
"gpt-4": 15,
"gpt-4-0314": 15,
"gpt-4-0613": 15,
"gpt-4-32k": 30,
"gpt-4-32k-0314": 30,
"gpt-4-32k-0613": 30,
"gpt-4-1106-preview": 5, // $0.01 / 1K tokens
"gpt-4-0125-preview": 5, // $0.01 / 1K tokens
"gpt-4-turbo-preview": 5, // $0.01 / 1K tokens
"gpt-4-vision-preview": 5, // $0.01 / 1K tokens
"gpt-3.5-turbo": 0.75, // $0.0015 / 1K tokens
"gpt-3.5-turbo-0301": 0.75,
"gpt-3.5-turbo-0613": 0.75,
"gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens
"gpt-3.5-turbo-16k-0613": 1.5,
"gpt-3.5-turbo-instruct": 0.75, // $0.0015 / 1K tokens
"gpt-3.5-turbo-1106": 0.5, // $0.001 / 1K tokens
"gpt-3.5-turbo-0125": 0.25, // $0.0005 / 1K tokens
"davinci-002": 1, // $0.002 / 1K tokens
"babbage-002": 0.2, // $0.0004 / 1K tokens
"text-ada-001": 0.2,
"text-babbage-001": 0.25,
"text-curie-001": 1,
"text-davinci-002": 10,
"text-davinci-003": 10,
"text-davinci-edit-001": 10,
"code-davinci-edit-001": 10,
"whisper-1": 15, // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
"tts-1": 7.5, // $0.015 / 1K characters
"tts-1-1106": 7.5,
"tts-1-hd": 15, // $0.030 / 1K characters
"tts-1-hd-1106": 15,
"davinci": 10,
"curie": 10,
"babbage": 10,
"ada": 10,
"text-embedding-ada-002": 0.05,
"text-embedding-3-small": 0.01,
"text-embedding-3-large": 0.065,
"text-search-ada-doc-001": 10,
"text-moderation-stable": 0.1,
"text-moderation-latest": 0.1,
"dall-e-2": 8, // $0.016 - $0.020 / image
"dall-e-3": 20, // $0.040 - $0.120 / image
"claude-instant-1": 0.815, // $1.63 / 1M tokens
"claude-2": 5.51, // $11.02 / 1M tokens
"claude-2.0": 5.51, // $11.02 / 1M tokens
"claude-2.1": 5.51, // $11.02 / 1M tokens
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens
"ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens
"ERNIE-Bot-4": 8.572, // ¥0.12 / 1k tokens
// https://openai.com/pricing
"gpt-4": 15,
"gpt-4-0314": 15,
"gpt-4-0613": 15,
"gpt-4-32k": 30,
"gpt-4-32k-0314": 30,
"gpt-4-32k-0613": 30,
"gpt-4-1106-preview": 5, // $0.01 / 1K tokens
"gpt-4-0125-preview": 5, // $0.01 / 1K tokens
"gpt-4-turbo-preview": 5, // $0.01 / 1K tokens
"gpt-4-vision-preview": 5, // $0.01 / 1K tokens
"gpt-3.5-turbo": 0.75, // $0.0015 / 1K tokens
"gpt-3.5-turbo-0301": 0.75,
"gpt-3.5-turbo-0613": 0.75,
"gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens
"gpt-3.5-turbo-16k-0613": 1.5,
"gpt-3.5-turbo-instruct": 0.75, // $0.0015 / 1K tokens
"gpt-3.5-turbo-1106": 0.5, // $0.001 / 1K tokens
"gpt-3.5-turbo-0125": 0.25, // $0.0005 / 1K tokens
"davinci-002": 1, // $0.002 / 1K tokens
"babbage-002": 0.2, // $0.0004 / 1K tokens
"text-ada-001": 0.2,
"text-babbage-001": 0.25,
"text-curie-001": 1,
"text-davinci-002": 10,
"text-davinci-003": 10,
"text-davinci-edit-001": 10,
"code-davinci-edit-001": 10,
"whisper-1": 15, // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
"tts-1": 7.5, // $0.015 / 1K characters
"tts-1-1106": 7.5,
"tts-1-hd": 15, // $0.030 / 1K characters
"tts-1-hd-1106": 15,
"davinci": 10,
"curie": 10,
"babbage": 10,
"ada": 10,
"text-embedding-ada-002": 0.05,
"text-embedding-3-small": 0.01,
"text-embedding-3-large": 0.065,
"text-search-ada-doc-001": 10,
"text-moderation-stable": 0.1,
"text-moderation-latest": 0.1,
"dall-e-2": 8, // $0.016 - $0.020 / image
"dall-e-3": 20, // $0.040 - $0.120 / image
"claude-instant-1": 0.815, // $1.63 / 1M tokens
"claude-2": 5.51, // $11.02 / 1M tokens
"claude-2.0": 5.51, // $11.02 / 1M tokens
"claude-2.1": 5.51, // $11.02 / 1M tokens
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens
"ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens
"ERNIE-Bot-4": 0.12 * RMB, // ¥0.12 / 1k tokens
"ERNIE-Bot-8k": 0.024 * RMB,
"Embedding-V1": 0.1429, // ¥0.002 / 1k tokens
"PaLM-2": 1,
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
@@ -103,11 +112,21 @@ var ModelRatio = map[string]float64{
"qwen-max-longcontext": 1.4286, // ¥0.02 / 1k tokens
"text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens
"SparkDesk": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v1.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"hunyuan": 7.143, // ¥0.1 / 1k tokens // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
"ChatStd": 0.01 * RMB,
"ChatPro": 0.1 * RMB,
// https://platform.moonshot.cn/pricing
"moonshot-v1-8k": 0.012 * RMB,
"moonshot-v1-32k": 0.024 * RMB,
"moonshot-v1-128k": 0.06 * RMB,
}
func ModelRatio2JSONString() string {

View File

@@ -4,7 +4,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/model"
"github.com/songquanpeng/one-api/relay/channel/openai"
relaymodel "github.com/songquanpeng/one-api/relay/model"
)
func GetSubscription(c *gin.Context) {
@@ -30,7 +30,7 @@ func GetSubscription(c *gin.Context) {
expiredTime = 0
}
if err != nil {
Error := openai.Error{
Error := relaymodel.Error{
Message: err.Error(),
Type: "upstream_error",
}
@@ -72,7 +72,7 @@ func GetUsage(c *gin.Context) {
quota, err = model.GetUserUsedQuota(userId)
}
if err != nil {
Error := openai.Error{
Error := relaymodel.Error{
Message: err.Error(),
Type: "one_api_error",
}

View File

@@ -9,10 +9,14 @@ import (
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/model"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/helper"
relaymodel "github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"sync"
"time"
@@ -20,87 +24,13 @@ import (
"github.com/gin-gonic/gin"
)
func testChannel(channel *model.Channel, request openai.ChatRequest) (err error, openaiErr *openai.Error) {
switch channel.Type {
case common.ChannelTypePaLM:
fallthrough
case common.ChannelTypeGemini:
fallthrough
case common.ChannelTypeAnthropic:
fallthrough
case common.ChannelTypeBaidu:
fallthrough
case common.ChannelTypeZhipu:
fallthrough
case common.ChannelTypeAli:
fallthrough
case common.ChannelType360:
fallthrough
case common.ChannelTypeXunfei:
return errors.New("该渠道类型当前版本不支持测试,请手动测试"), nil
case common.ChannelTypeAzure:
request.Model = "gpt-35-turbo"
defer func() {
if err != nil {
err = errors.New("请确保已在 Azure 上创建了 gpt-35-turbo 模型,并且 apiVersion 已正确填写!")
}
}()
default:
request.Model = "gpt-3.5-turbo"
}
requestURL := common.ChannelBaseURLs[channel.Type]
if channel.Type == common.ChannelTypeAzure {
requestURL = util.GetFullRequestURL(channel.GetBaseURL(), fmt.Sprintf("/openai/deployments/%s/chat/completions?api-version=2023-03-15-preview", request.Model), channel.Type)
} else {
if baseURL := channel.GetBaseURL(); len(baseURL) > 0 {
requestURL = baseURL
}
requestURL = util.GetFullRequestURL(requestURL, "/v1/chat/completions", channel.Type)
}
jsonData, err := json.Marshal(request)
if err != nil {
return err, nil
}
req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(jsonData))
if err != nil {
return err, nil
}
if channel.Type == common.ChannelTypeAzure {
req.Header.Set("api-key", channel.Key)
} else {
req.Header.Set("Authorization", "Bearer "+channel.Key)
}
req.Header.Set("Content-Type", "application/json")
resp, err := util.HTTPClient.Do(req)
if err != nil {
return err, nil
}
defer resp.Body.Close()
var response openai.SlimTextResponse
body, err := io.ReadAll(resp.Body)
if err != nil {
return err, nil
}
err = json.Unmarshal(body, &response)
if err != nil {
return fmt.Errorf("Error: %s\nResp body: %s", err, body), nil
}
if response.Usage.CompletionTokens == 0 {
if response.Error.Message == "" {
response.Error.Message = "补全 tokens 非预期返回 0"
}
return fmt.Errorf("type %s, code %v, message %s", response.Error.Type, response.Error.Code, response.Error.Message), &response.Error
}
return nil, nil
}
func buildTestRequest() *openai.ChatRequest {
testRequest := &openai.ChatRequest{
Model: "", // this will be set later
func buildTestRequest() *relaymodel.GeneralOpenAIRequest {
testRequest := &relaymodel.GeneralOpenAIRequest{
MaxTokens: 1,
Stream: false,
Model: "gpt-3.5-turbo",
}
testMessage := openai.Message{
testMessage := relaymodel.Message{
Role: "user",
Content: "hi",
}
@@ -108,6 +38,65 @@ func buildTestRequest() *openai.ChatRequest {
return testRequest
}
func testChannel(channel *model.Channel) (err error, openaiErr *relaymodel.Error) {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request = &http.Request{
Method: "POST",
URL: &url.URL{Path: "/v1/chat/completions"},
Body: nil,
Header: make(http.Header),
}
c.Request.Header.Set("Authorization", "Bearer "+channel.Key)
c.Request.Header.Set("Content-Type", "application/json")
c.Set("channel", channel.Type)
c.Set("base_url", channel.GetBaseURL())
meta := util.GetRelayMeta(c)
apiType := constant.ChannelType2APIType(channel.Type)
adaptor := helper.GetAdaptor(apiType)
if adaptor == nil {
return fmt.Errorf("invalid api type: %d, adaptor is nil", apiType), nil
}
adaptor.Init(meta)
modelName := adaptor.GetModelList()[0]
request := buildTestRequest()
request.Model = modelName
meta.OriginModelName, meta.ActualModelName = modelName, modelName
convertedRequest, err := adaptor.ConvertRequest(c, constant.RelayModeChatCompletions, request)
if err != nil {
return err, nil
}
jsonData, err := json.Marshal(convertedRequest)
if err != nil {
return err, nil
}
requestBody := bytes.NewBuffer(jsonData)
c.Request.Body = io.NopCloser(requestBody)
resp, err := adaptor.DoRequest(c, meta, requestBody)
if err != nil {
return err, nil
}
if resp.StatusCode != http.StatusOK {
err := util.RelayErrorHandler(resp)
return fmt.Errorf("status code %d: %s", resp.StatusCode, err.Error.Message), &err.Error
}
usage, respErr := adaptor.DoResponse(c, resp, meta)
if respErr != nil {
return fmt.Errorf("%s", respErr.Error.Message), &respErr.Error
}
if usage == nil {
return errors.New("usage is nil"), nil
}
result := w.Result()
// print result.Body
respBody, err := io.ReadAll(result.Body)
if err != nil {
return err, nil
}
logger.SysLog(fmt.Sprintf("testing channel #%d, response: \n%s", channel.Id, string(respBody)))
return nil, nil
}
func TestChannel(c *gin.Context) {
id, err := strconv.Atoi(c.Param("id"))
if err != nil {
@@ -125,9 +114,8 @@ func TestChannel(c *gin.Context) {
})
return
}
testRequest := buildTestRequest()
tik := time.Now()
err, _ = testChannel(channel, *testRequest)
err, _ = testChannel(channel)
tok := time.Now()
milliseconds := tok.Sub(tik).Milliseconds()
go channel.UpdateResponseTime(milliseconds)
@@ -192,7 +180,6 @@ func testAllChannels(notify bool) error {
if err != nil {
return err
}
testRequest := buildTestRequest()
var disableThreshold = int64(config.ChannelDisableThreshold * 1000)
if disableThreshold == 0 {
disableThreshold = 10000000 // a impossible value
@@ -201,7 +188,7 @@ func testAllChannels(notify bool) error {
for _, channel := range channels {
isChannelEnabled := channel.Status == common.ChannelStatusEnabled
tik := time.Now()
err, openaiErr := testChannel(channel, *testRequest)
err, openaiErr := testChannel(channel)
tok := time.Now()
milliseconds := tok.Sub(tik).Milliseconds()
if isChannelEnabled && milliseconds > disableThreshold {

View File

@@ -3,7 +3,11 @@ package controller
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/channel/ai360"
"github.com/songquanpeng/one-api/relay/channel/moonshot"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/helper"
relaymodel "github.com/songquanpeng/one-api/relay/model"
)
// https://platform.openai.com/docs/api-reference/models/list
@@ -53,592 +57,43 @@ func init() {
IsBlocking: false,
})
// https://platform.openai.com/docs/models/model-endpoint-compatibility
openAIModels = []OpenAIModels{
{
Id: "dall-e-2",
for i := 0; i < constant.APITypeDummy; i++ {
adaptor := helper.GetAdaptor(i)
channelName := adaptor.GetChannelName()
modelNames := adaptor.GetModelList()
for _, modelName := range modelNames {
openAIModels = append(openAIModels, OpenAIModels{
Id: modelName,
Object: "model",
Created: 1626777600,
OwnedBy: channelName,
Permission: permission,
Root: modelName,
Parent: nil,
})
}
}
for _, modelName := range ai360.ModelList {
openAIModels = append(openAIModels, OpenAIModels{
Id: modelName,
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "dall-e-2",
Parent: nil,
},
{
Id: "dall-e-3",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "dall-e-3",
Parent: nil,
},
{
Id: "whisper-1",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "whisper-1",
Parent: nil,
},
{
Id: "tts-1",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "tts-1",
Parent: nil,
},
{
Id: "tts-1-1106",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "tts-1-1106",
Parent: nil,
},
{
Id: "tts-1-hd",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "tts-1-hd",
Parent: nil,
},
{
Id: "tts-1-hd-1106",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "tts-1-hd-1106",
Parent: nil,
},
{
Id: "gpt-3.5-turbo",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-0301",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-0301",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-0613",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-16k",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-16k",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-16k-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-16k-0613",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-1106",
Object: "model",
Created: 1699593571,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-1106",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-0125",
Object: "model",
Created: 1706232090,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-0125",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-instruct",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-instruct",
Parent: nil,
},
{
Id: "gpt-4",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4",
Parent: nil,
},
{
Id: "gpt-4-0314",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-0314",
Parent: nil,
},
{
Id: "gpt-4-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-0613",
Parent: nil,
},
{
Id: "gpt-4-32k",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-32k",
Parent: nil,
},
{
Id: "gpt-4-32k-0314",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-32k-0314",
Parent: nil,
},
{
Id: "gpt-4-32k-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-32k-0613",
Parent: nil,
},
{
Id: "gpt-4-1106-preview",
Object: "model",
Created: 1699593571,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-1106-preview",
Parent: nil,
},
{
Id: "gpt-4-0125-preview",
Object: "model",
Created: 1706232090,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-0125-preview",
Parent: nil,
},
{
Id: "gpt-4-turbo-preview",
Object: "model",
Created: 1706232090,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-turbo-preview",
Parent: nil,
},
{
Id: "gpt-4-vision-preview",
Object: "model",
Created: 1699593571,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-vision-preview",
Parent: nil,
},
{
Id: "text-embedding-ada-002",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-embedding-ada-002",
Parent: nil,
},
{
Id: "text-embedding-3-small",
Object: "model",
Created: 1706232090,
OwnedBy: "openai",
Permission: permission,
Root: "text-embedding-3-small",
Parent: nil,
},
{
Id: "text-embedding-3-large",
Object: "model",
Created: 1706232090,
OwnedBy: "openai",
Permission: permission,
Root: "text-embedding-3-large",
Parent: nil,
},
{
Id: "text-davinci-003",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-davinci-003",
Parent: nil,
},
{
Id: "text-davinci-002",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-davinci-002",
Parent: nil,
},
{
Id: "text-curie-001",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-curie-001",
Parent: nil,
},
{
Id: "text-babbage-001",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-babbage-001",
Parent: nil,
},
{
Id: "text-ada-001",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-ada-001",
Parent: nil,
},
{
Id: "text-moderation-latest",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-moderation-latest",
Parent: nil,
},
{
Id: "text-moderation-stable",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-moderation-stable",
Parent: nil,
},
{
Id: "text-davinci-edit-001",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-davinci-edit-001",
Parent: nil,
},
{
Id: "code-davinci-edit-001",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "code-davinci-edit-001",
Parent: nil,
},
{
Id: "davinci-002",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "davinci-002",
Parent: nil,
},
{
Id: "babbage-002",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "babbage-002",
Parent: nil,
},
{
Id: "claude-instant-1",
Object: "model",
Created: 1677649963,
OwnedBy: "anthropic",
Permission: permission,
Root: "claude-instant-1",
Parent: nil,
},
{
Id: "claude-2",
Object: "model",
Created: 1677649963,
OwnedBy: "anthropic",
Permission: permission,
Root: "claude-2",
Parent: nil,
},
{
Id: "claude-2.1",
Object: "model",
Created: 1677649963,
OwnedBy: "anthropic",
Permission: permission,
Root: "claude-2.1",
Parent: nil,
},
{
Id: "claude-2.0",
Object: "model",
Created: 1677649963,
OwnedBy: "anthropic",
Permission: permission,
Root: "claude-2.0",
Parent: nil,
},
{
Id: "ERNIE-Bot",
Object: "model",
Created: 1677649963,
OwnedBy: "baidu",
Permission: permission,
Root: "ERNIE-Bot",
Parent: nil,
},
{
Id: "ERNIE-Bot-turbo",
Object: "model",
Created: 1677649963,
OwnedBy: "baidu",
Permission: permission,
Root: "ERNIE-Bot-turbo",
Parent: nil,
},
{
Id: "ERNIE-Bot-4",
Object: "model",
Created: 1677649963,
OwnedBy: "baidu",
Permission: permission,
Root: "ERNIE-Bot-4",
Parent: nil,
},
{
Id: "Embedding-V1",
Object: "model",
Created: 1677649963,
OwnedBy: "baidu",
Permission: permission,
Root: "Embedding-V1",
Parent: nil,
},
{
Id: "PaLM-2",
Object: "model",
Created: 1677649963,
OwnedBy: "google palm",
Permission: permission,
Root: "PaLM-2",
Parent: nil,
},
{
Id: "gemini-pro",
Object: "model",
Created: 1677649963,
OwnedBy: "google gemini",
Permission: permission,
Root: "gemini-pro",
Parent: nil,
},
{
Id: "gemini-pro-vision",
Object: "model",
Created: 1677649963,
OwnedBy: "google gemini",
Permission: permission,
Root: "gemini-pro-vision",
Parent: nil,
},
{
Id: "chatglm_turbo",
Object: "model",
Created: 1677649963,
OwnedBy: "zhipu",
Permission: permission,
Root: "chatglm_turbo",
Parent: nil,
},
{
Id: "chatglm_pro",
Object: "model",
Created: 1677649963,
OwnedBy: "zhipu",
Permission: permission,
Root: "chatglm_pro",
Parent: nil,
},
{
Id: "chatglm_std",
Object: "model",
Created: 1677649963,
OwnedBy: "zhipu",
Permission: permission,
Root: "chatglm_std",
Parent: nil,
},
{
Id: "chatglm_lite",
Object: "model",
Created: 1677649963,
OwnedBy: "zhipu",
Permission: permission,
Root: "chatglm_lite",
Parent: nil,
},
{
Id: "qwen-turbo",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "qwen-turbo",
Parent: nil,
},
{
Id: "qwen-plus",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "qwen-plus",
Parent: nil,
},
{
Id: "qwen-max",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "qwen-max",
Parent: nil,
},
{
Id: "qwen-max-longcontext",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "qwen-max-longcontext",
Parent: nil,
},
{
Id: "text-embedding-v1",
Object: "model",
Created: 1677649963,
OwnedBy: "ali",
Permission: permission,
Root: "text-embedding-v1",
Parent: nil,
},
{
Id: "SparkDesk",
Object: "model",
Created: 1677649963,
OwnedBy: "xunfei",
Permission: permission,
Root: "SparkDesk",
Parent: nil,
},
{
Id: "360GPT_S2_V9",
Object: "model",
Created: 1677649963,
Created: 1626777600,
OwnedBy: "360",
Permission: permission,
Root: "360GPT_S2_V9",
Root: modelName,
Parent: nil,
},
{
Id: "embedding-bert-512-v1",
})
}
for _, modelName := range moonshot.ModelList {
openAIModels = append(openAIModels, OpenAIModels{
Id: modelName,
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Created: 1626777600,
OwnedBy: "moonshot",
Permission: permission,
Root: "embedding-bert-512-v1",
Root: modelName,
Parent: nil,
},
{
Id: "embedding_s1_v1",
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Permission: permission,
Root: "embedding_s1_v1",
Parent: nil,
},
{
Id: "semantic_similarity_s1_v1",
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Permission: permission,
Root: "semantic_similarity_s1_v1",
Parent: nil,
},
{
Id: "hunyuan",
Object: "model",
Created: 1677649963,
OwnedBy: "tencent",
Permission: permission,
Root: "hunyuan",
Parent: nil,
},
})
}
openAIModelsMap = make(map[string]OpenAIModels)
for _, model := range openAIModels {
@@ -658,7 +113,7 @@ func RetrieveModel(c *gin.Context) {
if model, ok := openAIModelsMap[modelId]; ok {
c.JSON(200, model)
} else {
Error := openai.Error{
Error := relaymodel.Error{
Message: fmt.Sprintf("The model '%s' does not exist", modelId),
Type: "invalid_request_error",
Param: "model",

View File

@@ -6,9 +6,9 @@ import (
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/controller"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"net/http"
"strconv"
@@ -18,7 +18,7 @@ import (
func Relay(c *gin.Context) {
relayMode := constant.Path2RelayMode(c.Request.URL.Path)
var err *openai.ErrorWithStatusCode
var err *model.ErrorWithStatusCode
switch relayMode {
case constant.RelayModeImagesGenerations:
err = controller.RelayImageHelper(c, relayMode)
@@ -61,7 +61,7 @@ func Relay(c *gin.Context) {
}
func RelayNotImplemented(c *gin.Context) {
err := openai.Error{
err := model.Error{
Message: "API not implemented",
Type: "one_api_error",
Param: "",
@@ -73,7 +73,7 @@ func RelayNotImplemented(c *gin.Context) {
}
func RelayNotFound(c *gin.Context) {
err := openai.Error{
err := model.Error{
Message: fmt.Sprintf("Invalid URL (%s %s)", c.Request.Method, c.Request.URL.Path),
Type: "invalid_request_error",
Param: "",

View File

@@ -83,17 +83,22 @@ func Distribute() func(c *gin.Context) {
c.Set("model_mapping", channel.GetModelMapping())
c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
c.Set("base_url", channel.GetBaseURL())
// this is for backward compatibility
switch channel.Type {
case common.ChannelTypeAzure:
c.Set("api_version", channel.Other)
c.Set(common.ConfigKeyAPIVersion, channel.Other)
case common.ChannelTypeXunfei:
c.Set("api_version", channel.Other)
c.Set(common.ConfigKeyAPIVersion, channel.Other)
case common.ChannelTypeGemini:
c.Set("api_version", channel.Other)
c.Set(common.ConfigKeyAPIVersion, channel.Other)
case common.ChannelTypeAIProxyLibrary:
c.Set("library_id", channel.Other)
c.Set(common.ConfigKeyLibraryID, channel.Other)
case common.ChannelTypeAli:
c.Set("plugin", channel.Other)
c.Set(common.ConfigKeyPlugin, channel.Other)
}
cfg, _ := channel.LoadConfig()
for k, v := range cfg {
c.Set(common.ConfigKeyPrefix+k, v)
}
c.Next()
}

View File

@@ -21,7 +21,7 @@ type Channel struct {
TestTime int64 `json:"test_time" gorm:"bigint"`
ResponseTime int `json:"response_time"` // in milliseconds
BaseURL *string `json:"base_url" gorm:"column:base_url;default:''"`
Other string `json:"other"`
Other string `json:"other"` // DEPRECATED: please save config to field Config
Balance float64 `json:"balance"` // in USD
BalanceUpdatedTime int64 `json:"balance_updated_time" gorm:"bigint"`
Models string `json:"models"`
@@ -29,6 +29,7 @@ type Channel struct {
UsedQuota int64 `json:"used_quota" gorm:"bigint;default:0"`
ModelMapping *string `json:"model_mapping" gorm:"type:varchar(1024);default:''"`
Priority *int64 `json:"priority" gorm:"bigint;default:0"`
Config string `json:"config"`
}
func GetAllChannels(startIdx int, num int, selectAll bool) ([]*Channel, error) {
@@ -155,6 +156,18 @@ func (channel *Channel) Delete() error {
return err
}
func (channel *Channel) LoadConfig() (map[string]string, error) {
if channel.Config == "" {
return nil, nil
}
cfg := make(map[string]string)
err := json.Unmarshal([]byte(channel.Config), &cfg)
if err != nil {
return nil, err
}
return cfg, nil
}
func UpdateChannelStatusById(id int, status int) {
err := UpdateAbilityStatus(id, status == common.ChannelStatusEnabled)
if err != nil {

View File

@@ -0,0 +1,8 @@
package ai360
var ModelList = []string{
"360GPT_S2_V9",
"embedding-bert-512-v1",
"embedding_s1_v1",
"semantic_similarity_s1_v1",
}

View File

@@ -1,22 +1,60 @@
package aiproxy
import (
"errors"
"fmt"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/relay/channel"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http"
)
type Adaptor struct {
}
func (a *Adaptor) Auth(c *gin.Context) error {
func (a *Adaptor) Init(meta *util.RelayMeta) {
}
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
return fmt.Sprintf("%s/api/library/ask", meta.BaseURL), nil
}
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
channel.SetupCommonRequestHeader(c, req, meta)
req.Header.Set("Authorization", "Bearer "+meta.APIKey)
return nil
}
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) {
return nil, nil
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
if request == nil {
return nil, errors.New("request is nil")
}
aiProxyLibraryRequest := ConvertRequest(*request)
aiProxyLibraryRequest.LibraryId = c.GetString(common.ConfigKeyLibraryID)
return aiProxyLibraryRequest, nil
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) {
return nil, nil, nil
func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
return channel.DoRequestHelper(a, c, meta, requestBody)
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
if meta.IsStream {
err, usage = StreamHandler(c, resp)
} else {
err, usage = Handler(c, resp)
}
return
}
func (a *Adaptor) GetModelList() []string {
return ModelList
}
func (a *Adaptor) GetChannelName() string {
return "aiproxy"
}

View File

@@ -0,0 +1,9 @@
package aiproxy
import "github.com/songquanpeng/one-api/relay/channel/openai"
var ModelList = []string{""}
func init() {
ModelList = openai.ModelList
}

View File

@@ -15,11 +15,12 @@ import (
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/model"
)
// https://docs.aiproxy.io/dev/library#使用已经定制好的知识库进行对话问答
func ConvertRequest(request openai.GeneralOpenAIRequest) *LibraryRequest {
func ConvertRequest(request model.GeneralOpenAIRequest) *LibraryRequest {
query := ""
if len(request.Messages) != 0 {
query = request.Messages[len(request.Messages)-1].StringContent()
@@ -46,7 +47,7 @@ func responseAIProxyLibrary2OpenAI(response *LibraryResponse) *openai.TextRespon
content := response.Answer + aiProxyDocuments2Markdown(response.Documents)
choice := openai.TextResponseChoice{
Index: 0,
Message: openai.Message{
Message: model.Message{
Role: "assistant",
Content: content,
},
@@ -86,8 +87,8 @@ func streamResponseAIProxyLibrary2OpenAI(response *LibraryStreamResponse) *opena
}
}
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
var usage openai.Usage
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
var usage model.Usage
scanner := bufio.NewScanner(resp.Body)
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
@@ -158,7 +159,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
return nil, &usage
}
func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
func Handler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
var AIProxyLibraryResponse LibraryResponse
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
@@ -173,8 +174,8 @@ func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode,
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
}
if AIProxyLibraryResponse.ErrCode != 0 {
return &openai.ErrorWithStatusCode{
Error: openai.Error{
return &model.ErrorWithStatusCode{
Error: model.Error{
Message: AIProxyLibraryResponse.Message,
Type: strconv.Itoa(AIProxyLibraryResponse.ErrCode),
Code: AIProxyLibraryResponse.ErrCode,

View File

@@ -1,22 +1,83 @@
package ali
import (
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/channel/openai"
"net/http"
)
// import (
// "errors"
// "fmt"
// "github.com/gin-gonic/gin"
// "github.com/songquanpeng/one-api/common"
// "github.com/songquanpeng/one-api/relay/channel"
// "github.com/songquanpeng/one-api/relay/constant"
// "github.com/songquanpeng/one-api/relay/model"
// "github.com/songquanpeng/one-api/relay/util"
// "io"
// "net/http"
// )
type Adaptor struct {
}
// // https://help.aliyun.com/zh/dashscope/developer-reference/api-details
func (a *Adaptor) Auth(c *gin.Context) error {
return nil
}
// type Adaptor struct {
// }
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) {
return nil, nil
}
// func (a *Adaptor) Init(meta *util.RelayMeta) {
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) {
return nil, nil, nil
}
// }
// func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
// fullRequestURL := fmt.Sprintf("%s/api/v1/services/aigc/text-generation/generation", meta.BaseURL)
// if meta.Mode == constant.RelayModeEmbeddings {
// fullRequestURL = fmt.Sprintf("%s/api/v1/services/embeddings/text-embedding/text-embedding", meta.BaseURL)
// }
// return fullRequestURL, nil
// }
// func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
// channel.SetupCommonRequestHeader(c, req, meta)
// req.Header.Set("Authorization", "Bearer "+meta.APIKey)
// if meta.IsStream {
// req.Header.Set("X-DashScope-SSE", "enable")
// }
// if c.GetString(common.ConfigKeyPlugin) != "" {
// req.Header.Set("X-DashScope-Plugin", c.GetString(common.ConfigKeyPlugin))
// }
// return nil
// }
// func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
// if request == nil {
// return nil, errors.New("request is nil")
// }
// switch relayMode {
// case constant.RelayModeEmbeddings:
// baiduEmbeddingRequest := ConvertEmbeddingRequest(*request)
// return baiduEmbeddingRequest, nil
// default:
// baiduRequest := ConvertRequest(*request)
// return baiduRequest, nil
// }
// }
// func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
// return channel.DoRequestHelper(a, c, meta, requestBody)
// }
// func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
// if meta.IsStream {
// err, usage = StreamHandler(c, resp)
// } else {
// switch meta.Mode {
// case constant.RelayModeEmbeddings:
// err, usage = EmbeddingHandler(c, resp)
// default:
// err, usage = Handler(c, resp)
// }
// }
// return
// }
// func (a *Adaptor) GetModelList() []string {
// return ModelList
// }
// func (a *Adaptor) GetChannelName() string {
// return "ali"
// }

View File

@@ -0,0 +1,6 @@
package ali
var ModelList = []string{
"qwen-turbo", "qwen-plus", "qwen-max", "qwen-max-longcontext",
"text-embedding-v1",
}

View File

@@ -1,13 +1,7 @@
package ali
// import (
// "bufio"
// "encoding/json"
// "github.com/gin-gonic/gin"
// "io"
// "net/http"
// "one-api/common"
// "strings"
// "github.com/songquanpeng/one-api/common"
// )
// // https://help.aliyun.com/document_detail/613695.html?spm=a2c4g.2399480.0.0.1adb778fAdzP9w#341800c0f8w0r

View File

@@ -1,71 +1,71 @@
package ali
type Message struct {
Content string `json:"content"`
Role string `json:"role"`
}
// type Message struct {
// Content string `json:"content"`
// Role string `json:"role"`
// }
type Input struct {
//Prompt string `json:"prompt"`
Messages []Message `json:"messages"`
}
// type Input struct {
// //Prompt string `json:"prompt"`
// Messages []Message `json:"messages"`
// }
type Parameters struct {
TopP float64 `json:"top_p,omitempty"`
TopK int `json:"top_k,omitempty"`
Seed uint64 `json:"seed,omitempty"`
EnableSearch bool `json:"enable_search,omitempty"`
IncrementalOutput bool `json:"incremental_output,omitempty"`
}
// type Parameters struct {
// TopP float64 `json:"top_p,omitempty"`
// TopK int `json:"top_k,omitempty"`
// Seed uint64 `json:"seed,omitempty"`
// EnableSearch bool `json:"enable_search,omitempty"`
// IncrementalOutput bool `json:"incremental_output,omitempty"`
// }
type ChatRequest struct {
Model string `json:"model"`
Input Input `json:"input"`
Parameters Parameters `json:"parameters,omitempty"`
}
// type ChatRequest struct {
// Model string `json:"model"`
// Input Input `json:"input"`
// Parameters Parameters `json:"parameters,omitempty"`
// }
type EmbeddingRequest struct {
Model string `json:"model"`
Input struct {
Texts []string `json:"texts"`
} `json:"input"`
Parameters *struct {
TextType string `json:"text_type,omitempty"`
} `json:"parameters,omitempty"`
}
// type EmbeddingRequest struct {
// Model string `json:"model"`
// Input struct {
// Texts []string `json:"texts"`
// } `json:"input"`
// Parameters *struct {
// TextType string `json:"text_type,omitempty"`
// } `json:"parameters,omitempty"`
// }
type Embedding struct {
Embedding []float64 `json:"embedding"`
TextIndex int `json:"text_index"`
}
// type Embedding struct {
// Embedding []float64 `json:"embedding"`
// TextIndex int `json:"text_index"`
// }
type EmbeddingResponse struct {
Output struct {
Embeddings []Embedding `json:"embeddings"`
} `json:"output"`
Usage Usage `json:"usage"`
Error
}
// type EmbeddingResponse struct {
// Output struct {
// Embeddings []Embedding `json:"embeddings"`
// } `json:"output"`
// Usage Usage `json:"usage"`
// Error
// }
type Error struct {
Code string `json:"code"`
Message string `json:"message"`
RequestId string `json:"request_id"`
}
// type Error struct {
// Code string `json:"code"`
// Message string `json:"message"`
// RequestId string `json:"request_id"`
// }
type Usage struct {
InputTokens int `json:"input_tokens"`
OutputTokens int `json:"output_tokens"`
TotalTokens int `json:"total_tokens"`
}
// type Usage struct {
// InputTokens int `json:"input_tokens"`
// OutputTokens int `json:"output_tokens"`
// TotalTokens int `json:"total_tokens"`
// }
type Output struct {
Text string `json:"text"`
FinishReason string `json:"finish_reason"`
}
// type Output struct {
// Text string `json:"text"`
// FinishReason string `json:"finish_reason"`
// }
type ChatResponse struct {
Output Output `json:"output"`
Usage Usage `json:"usage"`
Error
}
// type ChatResponse struct {
// Output Output `json:"output"`
// Usage Usage `json:"usage"`
// Error
// }

View File

@@ -1,22 +1,65 @@
package anthropic
import (
"errors"
"fmt"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/channel"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http"
)
type Adaptor struct {
}
func (a *Adaptor) Auth(c *gin.Context) error {
func (a *Adaptor) Init(meta *util.RelayMeta) {
}
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
return fmt.Sprintf("%s/v1/complete", meta.BaseURL), nil
}
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
channel.SetupCommonRequestHeader(c, req, meta)
req.Header.Set("x-api-key", meta.APIKey)
anthropicVersion := c.Request.Header.Get("anthropic-version")
if anthropicVersion == "" {
anthropicVersion = "2023-06-01"
}
req.Header.Set("anthropic-version", anthropicVersion)
return nil
}
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) {
return nil, nil
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
if request == nil {
return nil, errors.New("request is nil")
}
return ConvertRequest(*request), nil
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) {
return nil, nil, nil
func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
return channel.DoRequestHelper(a, c, meta, requestBody)
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
if meta.IsStream {
var responseText string
err, responseText = StreamHandler(c, resp)
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
} else {
err, usage = Handler(c, resp, meta.PromptTokens, meta.ActualModelName)
}
return
}
func (a *Adaptor) GetModelList() []string {
return ModelList
}
func (a *Adaptor) GetChannelName() string {
return "authropic"
}

View File

@@ -0,0 +1,5 @@
package anthropic
var ModelList = []string{
"claude-instant-1", "claude-2", "claude-2.0", "claude-2.1",
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/model"
"io"
"net/http"
"strings"
@@ -25,7 +26,7 @@ func stopReasonClaude2OpenAI(reason string) string {
}
}
func ConvertRequest(textRequest openai.GeneralOpenAIRequest) *Request {
func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
claudeRequest := Request{
Model: textRequest.Model,
Prompt: "",
@@ -78,7 +79,7 @@ func streamResponseClaude2OpenAI(claudeResponse *Response) *openai.ChatCompletio
func responseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse {
choice := openai.TextResponseChoice{
Index: 0,
Message: openai.Message{
Message: model.Message{
Role: "assistant",
Content: strings.TrimPrefix(claudeResponse.Completion, " "),
Name: nil,
@@ -94,7 +95,7 @@ func responseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse {
return &fullTextResponse
}
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, string) {
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, string) {
responseText := ""
responseId := fmt.Sprintf("chatcmpl-%s", helper.GetUUID())
createdTime := helper.GetTimestamp()
@@ -159,7 +160,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
return nil, responseText
}
func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*openai.ErrorWithStatusCode, *openai.Usage) {
func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) {
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
@@ -174,8 +175,8 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
}
if claudeResponse.Error.Type != "" {
return &openai.ErrorWithStatusCode{
Error: openai.Error{
return &model.ErrorWithStatusCode{
Error: model.Error{
Message: claudeResponse.Error.Message,
Type: claudeResponse.Error.Type,
Param: "",
@@ -185,9 +186,9 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string
}, nil
}
fullTextResponse := responseClaude2OpenAI(&claudeResponse)
fullTextResponse.Model = model
completionTokens := openai.CountTokenText(claudeResponse.Completion, model)
usage := openai.Usage{
fullTextResponse.Model = modelName
completionTokens := openai.CountTokenText(claudeResponse.Completion, modelName)
usage := model.Usage{
PromptTokens: promptTokens,
CompletionTokens: completionTokens,
TotalTokens: promptTokens + completionTokens,

View File

@@ -1,22 +1,93 @@
package baidu
// import (
// "errors"
// "github.com/gin-gonic/gin"
// "github.com/songquanpeng/one-api/relay/channel/openai"
// "github.com/songquanpeng/one-api/relay/channel"
// "github.com/songquanpeng/one-api/relay/constant"
// "github.com/songquanpeng/one-api/relay/model"
// "github.com/songquanpeng/one-api/relay/util"
// "io"
// "net/http"
// )
// type Adaptor struct {
// // type Adaptor struct {
// // }
// func (a *Adaptor) Init(meta *util.RelayMeta) {
// }
// func (a *Adaptor) Auth(c *gin.Context) error {
// func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
// // https://cloud.baidu.com/doc/WENXINWORKSHOP/s/clntwmv7t
// var fullRequestURL string
// switch meta.ActualModelName {
// case "ERNIE-Bot-4":
// fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro"
// case "ERNIE-Bot-8K":
// fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie_bot_8k"
// case "ERNIE-Bot":
// fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions"
// case "ERNIE-Speed":
// fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie_speed"
// case "ERNIE-Bot-turbo":
// fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant"
// case "BLOOMZ-7B":
// fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/bloomz_7b1"
// case "Embedding-V1":
// fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings/embedding-v1"
// }
// var accessToken string
// var err error
// if accessToken, err = GetAccessToken(meta.APIKey); err != nil {
// return "", err
// }
// fullRequestURL += "?access_token=" + accessToken
// return fullRequestURL, nil
// }
// func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
// channel.SetupCommonRequestHeader(c, req, meta)
// req.Header.Set("Authorization", "Bearer "+meta.APIKey)
// return nil
// }
// func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) {
// return nil, nil
// func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
// if request == nil {
// return nil, errors.New("request is nil")
// }
// switch relayMode {
// case constant.RelayModeEmbeddings:
// baiduEmbeddingRequest := ConvertEmbeddingRequest(*request)
// return baiduEmbeddingRequest, nil
// default:
// baiduRequest := ConvertRequest(*request)
// return baiduRequest, nil
// }
// }
// func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) {
// return nil, nil, nil
// func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
// return channel.DoRequestHelper(a, c, meta, requestBody)
// }
// func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
// if meta.IsStream {
// err, usage = StreamHandler(c, resp)
// } else {
// switch meta.Mode {
// case constant.RelayModeEmbeddings:
// err, usage = EmbeddingHandler(c, resp)
// default:
// err, usage = Handler(c, resp)
// }
// }
// return
// }
// func (a *Adaptor) GetModelList() []string {
// return ModelList
// }
// func (a *Adaptor) GetChannelName() string {
// return "baidu"
// }

View File

@@ -0,0 +1,10 @@
package baidu
var ModelList = []string{
"ERNIE-Bot-4",
"ERNIE-Bot-8K",
"ERNIE-Bot",
"ERNIE-Speed",
"ERNIE-Bot-turbo",
"Embedding-V1",
}

51
relay/channel/common.go Normal file
View File

@@ -0,0 +1,51 @@
package channel
import (
"errors"
"fmt"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http"
)
func SetupCommonRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) {
req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
req.Header.Set("Accept", c.Request.Header.Get("Accept"))
if meta.IsStream && c.Request.Header.Get("Accept") == "" {
req.Header.Set("Accept", "text/event-stream")
}
}
func DoRequestHelper(a Adaptor, c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
fullRequestURL, err := a.GetRequestURL(meta)
if err != nil {
return nil, fmt.Errorf("get request url failed: %w", err)
}
req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
if err != nil {
return nil, fmt.Errorf("new request failed: %w", err)
}
err = a.SetupRequestHeader(c, req, meta)
if err != nil {
return nil, fmt.Errorf("setup request header failed: %w", err)
}
resp, err := DoRequest(c, req)
if err != nil {
return nil, fmt.Errorf("do request failed: %w", err)
}
return resp, nil
}
func DoRequest(c *gin.Context, req *http.Request) (*http.Response, error) {
resp, err := util.HTTPClient.Do(req)
if err != nil {
return nil, err
}
if resp == nil {
return nil, errors.New("resp is nil")
}
_ = req.Body.Close()
_ = c.Request.Body.Close()
return resp, nil
}

View File

@@ -0,0 +1,66 @@
package gemini
import (
"errors"
"fmt"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/helper"
channelhelper "github.com/songquanpeng/one-api/relay/channel"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http"
)
type Adaptor struct {
}
func (a *Adaptor) Init(meta *util.RelayMeta) {
}
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
version := helper.AssignOrDefault(meta.APIVersion, "v1")
action := "generateContent"
if meta.IsStream {
action = "streamGenerateContent"
}
return fmt.Sprintf("%s/%s/models/%s:%s", meta.BaseURL, version, meta.ActualModelName, action), nil
}
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
channelhelper.SetupCommonRequestHeader(c, req, meta)
req.Header.Set("x-goog-api-key", meta.APIKey)
return nil
}
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
if request == nil {
return nil, errors.New("request is nil")
}
return ConvertRequest(*request), nil
}
func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
return channelhelper.DoRequestHelper(a, c, meta, requestBody)
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
if meta.IsStream {
var responseText string
err, responseText = StreamHandler(c, resp)
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
} else {
err, usage = Handler(c, resp, meta.PromptTokens, meta.ActualModelName)
}
return
}
func (a *Adaptor) GetModelList() []string {
return ModelList
}
func (a *Adaptor) GetChannelName() string {
return "google gemini"
}

View File

@@ -0,0 +1,6 @@
package gemini
var ModelList = []string{
"gemini-pro",
"gemini-pro-vision",
}

View File

@@ -1,4 +1,4 @@
package google
package gemini
import (
"bufio"
@@ -12,6 +12,7 @@ import (
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/model"
"io"
"net/http"
"strings"
@@ -22,14 +23,14 @@ import (
// https://ai.google.dev/docs/gemini_api_overview?hl=zh-cn
const (
GeminiVisionMaxImageNum = 16
VisionMaxImageNum = 16
)
// Setting safety to the lowest possible values since Gemini is already powerless enough
func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRequest {
geminiRequest := GeminiChatRequest{
Contents: make([]GeminiChatContent, 0, len(textRequest.Messages)),
SafetySettings: []GeminiChatSafetySettings{
func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
geminiRequest := ChatRequest{
Contents: make([]ChatContent, 0, len(textRequest.Messages)),
SafetySettings: []ChatSafetySettings{
{
Category: "HARM_CATEGORY_HARASSMENT",
Threshold: config.GeminiSafetySetting,
@@ -47,14 +48,14 @@ func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRe
Threshold: config.GeminiSafetySetting,
},
},
GenerationConfig: GeminiChatGenerationConfig{
GenerationConfig: ChatGenerationConfig{
Temperature: textRequest.Temperature,
TopP: textRequest.TopP,
MaxOutputTokens: textRequest.MaxTokens,
},
}
if textRequest.Functions != nil {
geminiRequest.Tools = []GeminiChatTools{
geminiRequest.Tools = []ChatTools{
{
FunctionDeclarations: textRequest.Functions,
},
@@ -62,28 +63,27 @@ func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRe
}
shouldAddDummyModelMessage := false
for _, message := range textRequest.Messages {
content := GeminiChatContent{
content := ChatContent{
Role: message.Role,
Parts: []GeminiPart{
Parts: []Part{
{
Text: message.StringContent(),
},
},
}
openaiContent := message.ParseContent()
var parts []GeminiPart
var parts []Part
imageNum := 0
for _, part := range openaiContent {
if part.Type == openai.ContentTypeText {
parts = append(parts, GeminiPart{
if part.Type == model.ContentTypeText {
parts = append(parts, Part{
Text: part.Text,
})
} else if part.Type == openai.ContentTypeImageURL {
} else if part.Type == model.ContentTypeImageURL {
imageNum += 1
if imageNum > GeminiVisionMaxImageNum {
if imageNum > VisionMaxImageNum {
continue
}
mimeType, data, err := image.GetImageFromUrl(part.ImageURL.Url)
if err != nil {
logger.Warn(context.TODO(),
@@ -91,8 +91,8 @@ func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRe
continue
}
parts = append(parts, GeminiPart{
InlineData: &GeminiInlineData{
parts = append(parts, Part{
InlineData: &InlineData{
MimeType: mimeType,
Data: data,
},
@@ -116,9 +116,9 @@ func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRe
// If a system message is the last message, we need to add a dummy model message to make gemini happy
if shouldAddDummyModelMessage {
geminiRequest.Contents = append(geminiRequest.Contents, GeminiChatContent{
geminiRequest.Contents = append(geminiRequest.Contents, ChatContent{
Role: "model",
Parts: []GeminiPart{
Parts: []Part{
{
Text: "Okay",
},
@@ -131,12 +131,12 @@ func ConvertGeminiRequest(textRequest openai.GeneralOpenAIRequest) *GeminiChatRe
return &geminiRequest
}
type GeminiChatResponse struct {
Candidates []GeminiChatCandidate `json:"candidates"`
PromptFeedback GeminiChatPromptFeedback `json:"promptFeedback"`
type ChatResponse struct {
Candidates []ChatCandidate `json:"candidates"`
PromptFeedback ChatPromptFeedback `json:"promptFeedback"`
}
func (g *GeminiChatResponse) GetResponseText() string {
func (g *ChatResponse) GetResponseText() string {
if g == nil {
return ""
}
@@ -146,23 +146,23 @@ func (g *GeminiChatResponse) GetResponseText() string {
return ""
}
type GeminiChatCandidate struct {
Content GeminiChatContent `json:"content"`
FinishReason string `json:"finishReason"`
Index int64 `json:"index"`
SafetyRatings []GeminiChatSafetyRating `json:"safetyRatings"`
type ChatCandidate struct {
Content ChatContent `json:"content"`
FinishReason string `json:"finishReason"`
Index int64 `json:"index"`
SafetyRatings []ChatSafetyRating `json:"safetyRatings"`
}
type GeminiChatSafetyRating struct {
type ChatSafetyRating struct {
Category string `json:"category"`
Probability string `json:"probability"`
}
type GeminiChatPromptFeedback struct {
SafetyRatings []GeminiChatSafetyRating `json:"safetyRatings"`
type ChatPromptFeedback struct {
SafetyRatings []ChatSafetyRating `json:"safetyRatings"`
}
func responseGeminiChat2OpenAI(response *GeminiChatResponse) *openai.TextResponse {
func responseGeminiChat2OpenAI(response *ChatResponse) *openai.TextResponse {
fullTextResponse := openai.TextResponse{
Id: fmt.Sprintf("chatcmpl-%s", helper.GetUUID()),
Object: "chat.completion",
@@ -172,7 +172,7 @@ func responseGeminiChat2OpenAI(response *GeminiChatResponse) *openai.TextRespons
for i, candidate := range response.Candidates {
choice := openai.TextResponseChoice{
Index: i,
Message: openai.Message{
Message: model.Message{
Role: "assistant",
Content: "",
},
@@ -186,7 +186,7 @@ func responseGeminiChat2OpenAI(response *GeminiChatResponse) *openai.TextRespons
return &fullTextResponse
}
func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) *openai.ChatCompletionsStreamResponse {
func streamResponseGeminiChat2OpenAI(geminiResponse *ChatResponse) *openai.ChatCompletionsStreamResponse {
var choice openai.ChatCompletionsStreamResponseChoice
choice.Delta.Content = geminiResponse.GetResponseText()
choice.FinishReason = &constant.StopFinishReason
@@ -197,7 +197,7 @@ func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) *openai
return &response
}
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, string) {
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, string) {
responseText := ""
dataChan := make(chan string)
stopChan := make(chan bool)
@@ -267,7 +267,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
return nil, responseText
}
func GeminiHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*openai.ErrorWithStatusCode, *openai.Usage) {
func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) {
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
@@ -276,14 +276,14 @@ func GeminiHandler(c *gin.Context, resp *http.Response, promptTokens int, model
if err != nil {
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
}
var geminiResponse GeminiChatResponse
var geminiResponse ChatResponse
err = json.Unmarshal(responseBody, &geminiResponse)
if err != nil {
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
}
if len(geminiResponse.Candidates) == 0 {
return &openai.ErrorWithStatusCode{
Error: openai.Error{
return &model.ErrorWithStatusCode{
Error: model.Error{
Message: "No candidates returned",
Type: "server_error",
Param: "",
@@ -293,9 +293,9 @@ func GeminiHandler(c *gin.Context, resp *http.Response, promptTokens int, model
}, nil
}
fullTextResponse := responseGeminiChat2OpenAI(&geminiResponse)
fullTextResponse.Model = model
completionTokens := openai.CountTokenText(geminiResponse.GetResponseText(), model)
usage := openai.Usage{
fullTextResponse.Model = modelName
completionTokens := openai.CountTokenText(geminiResponse.GetResponseText(), modelName)
usage := model.Usage{
PromptTokens: promptTokens,
CompletionTokens: completionTokens,
TotalTokens: promptTokens + completionTokens,

View File

@@ -0,0 +1,41 @@
package gemini
type ChatRequest struct {
Contents []ChatContent `json:"contents"`
SafetySettings []ChatSafetySettings `json:"safety_settings,omitempty"`
GenerationConfig ChatGenerationConfig `json:"generation_config,omitempty"`
Tools []ChatTools `json:"tools,omitempty"`
}
type InlineData struct {
MimeType string `json:"mimeType"`
Data string `json:"data"`
}
type Part struct {
Text string `json:"text,omitempty"`
InlineData *InlineData `json:"inlineData,omitempty"`
}
type ChatContent struct {
Role string `json:"role,omitempty"`
Parts []Part `json:"parts"`
}
type ChatSafetySettings struct {
Category string `json:"category"`
Threshold string `json:"threshold"`
}
type ChatTools struct {
FunctionDeclarations any `json:"functionDeclarations,omitempty"`
}
type ChatGenerationConfig struct {
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"topP,omitempty"`
TopK float64 `json:"topK,omitempty"`
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
CandidateCount int `json:"candidateCount,omitempty"`
StopSequences []string `json:"stopSequences,omitempty"`
}

View File

@@ -1,22 +0,0 @@
package google
import (
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/channel/openai"
"net/http"
)
type Adaptor struct {
}
func (a *Adaptor) Auth(c *gin.Context) error {
return nil
}
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) {
return nil, nil
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) {
return nil, nil, nil
}

View File

@@ -1,80 +0,0 @@
package google
import (
"github.com/songquanpeng/one-api/relay/channel/openai"
)
type GeminiChatRequest struct {
Contents []GeminiChatContent `json:"contents"`
SafetySettings []GeminiChatSafetySettings `json:"safety_settings,omitempty"`
GenerationConfig GeminiChatGenerationConfig `json:"generation_config,omitempty"`
Tools []GeminiChatTools `json:"tools,omitempty"`
}
type GeminiInlineData struct {
MimeType string `json:"mimeType"`
Data string `json:"data"`
}
type GeminiPart struct {
Text string `json:"text,omitempty"`
InlineData *GeminiInlineData `json:"inlineData,omitempty"`
}
type GeminiChatContent struct {
Role string `json:"role,omitempty"`
Parts []GeminiPart `json:"parts"`
}
type GeminiChatSafetySettings struct {
Category string `json:"category"`
Threshold string `json:"threshold"`
}
type GeminiChatTools struct {
FunctionDeclarations any `json:"functionDeclarations,omitempty"`
}
type GeminiChatGenerationConfig struct {
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"topP,omitempty"`
TopK float64 `json:"topK,omitempty"`
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
CandidateCount int `json:"candidateCount,omitempty"`
StopSequences []string `json:"stopSequences,omitempty"`
}
type PaLMChatMessage struct {
Author string `json:"author"`
Content string `json:"content"`
}
type PaLMFilter struct {
Reason string `json:"reason"`
Message string `json:"message"`
}
type PaLMPrompt struct {
Messages []PaLMChatMessage `json:"messages"`
}
type PaLMChatRequest struct {
Prompt PaLMPrompt `json:"prompt"`
Temperature float64 `json:"temperature,omitempty"`
CandidateCount int `json:"candidateCount,omitempty"`
TopP float64 `json:"topP,omitempty"`
TopK int `json:"topK,omitempty"`
}
type PaLMError struct {
Code int `json:"code"`
Message string `json:"message"`
Status string `json:"status"`
}
type PaLMChatResponse struct {
Candidates []PaLMChatMessage `json:"candidates"`
Messages []openai.Message `json:"messages"`
Filters []PaLMFilter `json:"filters"`
Error PaLMError `json:"error"`
}

View File

@@ -2,14 +2,19 @@ package channel
import (
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http"
)
type Adaptor interface {
GetRequestURL() string
Auth(c *gin.Context) error
ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error)
DoRequest(request *openai.GeneralOpenAIRequest) error
DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error)
Init(meta *util.RelayMeta)
GetRequestURL(meta *util.RelayMeta) (string, error)
SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error
ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error)
DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error)
DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode)
GetModelList() []string
GetChannelName() string
}

View File

@@ -0,0 +1,7 @@
package moonshot
var ModelList = []string{
"moonshot-v1-8k",
"moonshot-v1-32k",
"moonshot-v1-128k",
}

View File

@@ -1,21 +1,103 @@
package openai
import (
"errors"
"fmt"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/relay/channel"
"github.com/songquanpeng/one-api/relay/channel/ai360"
"github.com/songquanpeng/one-api/relay/channel/moonshot"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http"
"strings"
)
type Adaptor struct {
ChannelType int
}
func (a *Adaptor) Auth(c *gin.Context) error {
func (a *Adaptor) Init(meta *util.RelayMeta) {
a.ChannelType = meta.ChannelType
}
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
if meta.ChannelType == common.ChannelTypeAzure {
// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api
requestURL := strings.Split(meta.RequestURLPath, "?")[0]
requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, meta.APIVersion)
task := strings.TrimPrefix(requestURL, "/v1/")
model_ := meta.ActualModelName
model_ = strings.Replace(model_, ".", "", -1)
// https://github.com/songquanpeng/one-api/issues/67
model_ = strings.TrimSuffix(model_, "-0301")
model_ = strings.TrimSuffix(model_, "-0314")
model_ = strings.TrimSuffix(model_, "-0613")
requestURL = fmt.Sprintf("/openai/deployments/%s/%s", model_, task)
return util.GetFullRequestURL(meta.BaseURL, requestURL, meta.ChannelType), nil
}
return util.GetFullRequestURL(meta.BaseURL, meta.RequestURLPath, meta.ChannelType), nil
}
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
channel.SetupCommonRequestHeader(c, req, meta)
if meta.ChannelType == common.ChannelTypeAzure {
req.Header.Set("api-key", meta.APIKey)
return nil
}
req.Header.Set("Authorization", "Bearer "+meta.APIKey)
if meta.ChannelType == common.ChannelTypeOpenRouter {
req.Header.Set("HTTP-Referer", "https://github.com/songquanpeng/one-api")
req.Header.Set("X-Title", "One API")
}
return nil
}
func (a *Adaptor) ConvertRequest(request *GeneralOpenAIRequest) (any, error) {
return nil, nil
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
if request == nil {
return nil, errors.New("request is nil")
}
return request, nil
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*ErrorWithStatusCode, *Usage, error) {
return nil, nil, nil
func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
return channel.DoRequestHelper(a, c, meta, requestBody)
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
if meta.IsStream {
var responseText string
err, responseText = StreamHandler(c, resp, meta.Mode)
usage = ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
} else {
err, usage = Handler(c, resp, meta.PromptTokens, meta.ActualModelName)
}
return
}
func (a *Adaptor) GetModelList() []string {
switch a.ChannelType {
case common.ChannelType360:
return ai360.ModelList
case common.ChannelTypeMoonshot:
return moonshot.ModelList
default:
return ModelList
}
}
func (a *Adaptor) GetChannelName() string {
switch a.ChannelType {
case common.ChannelTypeAzure:
return "azure"
case common.ChannelType360:
return "360"
case common.ChannelTypeMoonshot:
return "moonshot"
default:
return "openai"
}
}

View File

@@ -0,0 +1,19 @@
package openai
var ModelList = []string{
"gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-instruct",
"gpt-4", "gpt-4-0314", "gpt-4-0613", "gpt-4-1106-preview", "gpt-4-0125-preview",
"gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613",
"gpt-4-turbo-preview",
"gpt-4-vision-preview",
"text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large",
"text-curie-001", "text-babbage-001", "text-ada-001", "text-davinci-002", "text-davinci-003",
"text-moderation-latest", "text-moderation-stable",
"text-davinci-edit-001",
"davinci-002", "babbage-002",
"dall-e-2", "dall-e-3",
"whisper-1",
"tts-1", "tts-1-1106", "tts-1-hd", "tts-1-hd-1106",
}

View File

@@ -0,0 +1,11 @@
package openai
import "github.com/songquanpeng/one-api/relay/model"
func ResponseText2Usage(responseText string, modeName string, promptTokens int) *model.Usage {
usage := &model.Usage{}
usage.PromptTokens = promptTokens
usage.CompletionTokens = CountTokenText(responseText, modeName)
usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
return usage
}

View File

@@ -8,12 +8,13 @@ import (
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/model"
"io"
"net/http"
"strings"
)
func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*ErrorWithStatusCode, string) {
func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.ErrorWithStatusCode, string) {
responseText := ""
scanner := bufio.NewScanner(resp.Body)
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
@@ -90,7 +91,7 @@ func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*ErrorWi
return nil, responseText
}
func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*ErrorWithStatusCode, *Usage) {
func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) {
var textResponse SlimTextResponse
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
@@ -105,7 +106,7 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string
return ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
}
if textResponse.Error.Type != "" {
return &ErrorWithStatusCode{
return &model.ErrorWithStatusCode{
Error: textResponse.Error,
StatusCode: resp.StatusCode,
}, nil
@@ -133,9 +134,9 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, model string
if textResponse.Usage.TotalTokens == 0 {
completionTokens := 0
for _, choice := range textResponse.Choices {
completionTokens += CountTokenText(choice.Message.StringContent(), model)
completionTokens += CountTokenText(choice.Message.StringContent(), modelName)
}
textResponse.Usage = Usage{
textResponse.Usage = model.Usage{
PromptTokens: promptTokens,
CompletionTokens: completionTokens,
TotalTokens: promptTokens + completionTokens,

View File

@@ -1,15 +1,6 @@
package openai
type Message struct {
Role string `json:"role"`
Content any `json:"content"`
Name *string `json:"name,omitempty"`
}
type ImageURL struct {
Url string `json:"url,omitempty"`
Detail string `json:"detail,omitempty"`
}
import "github.com/songquanpeng/one-api/relay/model"
type TextContent struct {
Type string `json:"type,omitempty"`
@@ -17,142 +8,21 @@ type TextContent struct {
}
type ImageContent struct {
Type string `json:"type,omitempty"`
ImageURL *ImageURL `json:"image_url,omitempty"`
}
type OpenAIMessageContent struct {
Type string `json:"type,omitempty"`
Text string `json:"text"`
ImageURL *ImageURL `json:"image_url,omitempty"`
}
func (m Message) IsStringContent() bool {
_, ok := m.Content.(string)
return ok
}
func (m Message) StringContent() string {
content, ok := m.Content.(string)
if ok {
return content
}
contentList, ok := m.Content.([]any)
if ok {
var contentStr string
for _, contentItem := range contentList {
contentMap, ok := contentItem.(map[string]any)
if !ok {
continue
}
if contentMap["type"] == ContentTypeText {
if subStr, ok := contentMap["text"].(string); ok {
contentStr += subStr
}
}
}
return contentStr
}
return ""
}
func (m Message) ParseContent() []OpenAIMessageContent {
var contentList []OpenAIMessageContent
content, ok := m.Content.(string)
if ok {
contentList = append(contentList, OpenAIMessageContent{
Type: ContentTypeText,
Text: content,
})
return contentList
}
anyList, ok := m.Content.([]any)
if ok {
for _, contentItem := range anyList {
contentMap, ok := contentItem.(map[string]any)
if !ok {
continue
}
switch contentMap["type"] {
case ContentTypeText:
if subStr, ok := contentMap["text"].(string); ok {
contentList = append(contentList, OpenAIMessageContent{
Type: ContentTypeText,
Text: subStr,
})
}
case ContentTypeImageURL:
if subObj, ok := contentMap["image_url"].(map[string]any); ok {
contentList = append(contentList, OpenAIMessageContent{
Type: ContentTypeImageURL,
ImageURL: &ImageURL{
Url: subObj["url"].(string),
},
})
}
}
}
return contentList
}
return nil
}
type ResponseFormat struct {
Type string `json:"type,omitempty"`
}
type GeneralOpenAIRequest struct {
Model string `json:"model,omitempty"`
Messages []Message `json:"messages,omitempty"`
Prompt any `json:"prompt,omitempty"`
Stream bool `json:"stream,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
N int `json:"n,omitempty"`
Input any `json:"input,omitempty"`
Instruction string `json:"instruction,omitempty"`
Size string `json:"size,omitempty"`
Functions any `json:"functions,omitempty"`
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
PresencePenalty float64 `json:"presence_penalty,omitempty"`
ResponseFormat *ResponseFormat `json:"response_format,omitempty"`
Seed float64 `json:"seed,omitempty"`
Tools any `json:"tools,omitempty"`
ToolChoice any `json:"tool_choice,omitempty"`
User string `json:"user,omitempty"`
}
func (r GeneralOpenAIRequest) ParseInput() []string {
if r.Input == nil {
return nil
}
var input []string
switch r.Input.(type) {
case string:
input = []string{r.Input.(string)}
case []any:
input = make([]string, 0, len(r.Input.([]any)))
for _, item := range r.Input.([]any) {
if str, ok := item.(string); ok {
input = append(input, str)
}
}
}
return input
Type string `json:"type,omitempty"`
ImageURL *model.ImageURL `json:"image_url,omitempty"`
}
type ChatRequest struct {
Model string `json:"model"`
Messages []Message `json:"messages"`
MaxTokens int `json:"max_tokens"`
Model string `json:"model"`
Messages []model.Message `json:"messages"`
MaxTokens int `json:"max_tokens"`
}
type TextRequest struct {
Model string `json:"model"`
Messages []Message `json:"messages"`
Prompt string `json:"prompt"`
MaxTokens int `json:"max_tokens"`
Model string `json:"model"`
Messages []model.Message `json:"messages"`
Prompt string `json:"prompt"`
MaxTokens int `json:"max_tokens"`
//Stream bool `json:"stream"`
}
@@ -201,48 +71,30 @@ type TextToSpeechRequest struct {
ResponseFormat string `json:"response_format"`
}
type Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
}
type UsageOrResponseText struct {
*Usage
*model.Usage
ResponseText string
}
type Error struct {
Message string `json:"message"`
Type string `json:"type"`
Param string `json:"param"`
Code any `json:"code"`
}
type ErrorWithStatusCode struct {
Error
StatusCode int `json:"status_code"`
}
type SlimTextResponse struct {
Choices []TextResponseChoice `json:"choices"`
Usage `json:"usage"`
Error Error `json:"error"`
Choices []TextResponseChoice `json:"choices"`
model.Usage `json:"usage"`
Error model.Error `json:"error"`
}
type TextResponseChoice struct {
Index int `json:"index"`
Message `json:"message"`
FinishReason string `json:"finish_reason"`
Index int `json:"index"`
model.Message `json:"message"`
FinishReason string `json:"finish_reason"`
}
type TextResponse struct {
Id string `json:"id"`
Model string `json:"model,omitempty"`
Object string `json:"object"`
Created int64 `json:"created"`
Choices []TextResponseChoice `json:"choices"`
Usage `json:"usage"`
Id string `json:"id"`
Model string `json:"model,omitempty"`
Object string `json:"object"`
Created int64 `json:"created"`
Choices []TextResponseChoice `json:"choices"`
model.Usage `json:"usage"`
}
type EmbeddingResponseItem struct {
@@ -252,10 +104,10 @@ type EmbeddingResponseItem struct {
}
type EmbeddingResponse struct {
Object string `json:"object"`
Data []EmbeddingResponseItem `json:"data"`
Model string `json:"model"`
Usage `json:"usage"`
Object string `json:"object"`
Data []EmbeddingResponseItem `json:"data"`
Model string `json:"model"`
model.Usage `json:"usage"`
}
type ImageResponse struct {

View File

@@ -8,6 +8,7 @@ import (
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/image"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/model"
"math"
"strings"
)
@@ -63,7 +64,7 @@ func getTokenNum(tokenEncoder *tiktoken.Tiktoken, text string) int {
return len(tokenEncoder.Encode(text, nil, nil))
}
func CountTokenMessages(messages []Message, model string) int {
func CountTokenMessages(messages []model.Message, model string) int {
tokenEncoder := getTokenEncoder(model)
// Reference:
// https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb

View File

@@ -1,12 +1,14 @@
package openai
func ErrorWrapper(err error, code string, statusCode int) *ErrorWithStatusCode {
Error := Error{
import "github.com/songquanpeng/one-api/relay/model"
func ErrorWrapper(err error, code string, statusCode int) *model.ErrorWithStatusCode {
Error := model.Error{
Message: err.Error(),
Type: "one_api_error",
Code: code,
}
return &ErrorWithStatusCode{
return &model.ErrorWithStatusCode{
Error: Error,
StatusCode: statusCode,
}

View File

@@ -0,0 +1,60 @@
package palm
import (
"errors"
"fmt"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/channel"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http"
)
type Adaptor struct {
}
func (a *Adaptor) Init(meta *util.RelayMeta) {
}
func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
return fmt.Sprintf("%s/v1beta2/models/chat-bison-001:generateMessage", meta.BaseURL), nil
}
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
channel.SetupCommonRequestHeader(c, req, meta)
req.Header.Set("x-goog-api-key", meta.APIKey)
return nil
}
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
if request == nil {
return nil, errors.New("request is nil")
}
return ConvertRequest(*request), nil
}
func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
return channel.DoRequestHelper(a, c, meta, requestBody)
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
if meta.IsStream {
var responseText string
err, responseText = StreamHandler(c, resp)
usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
} else {
err, usage = Handler(c, resp, meta.PromptTokens, meta.ActualModelName)
}
return
}
func (a *Adaptor) GetModelList() []string {
return ModelList
}
func (a *Adaptor) GetChannelName() string {
return "google palm"
}

View File

@@ -0,0 +1,5 @@
package palm
var ModelList = []string{
"PaLM-2",
}

View File

@@ -0,0 +1,40 @@
package palm
import (
"github.com/songquanpeng/one-api/relay/model"
)
type ChatMessage struct {
Author string `json:"author"`
Content string `json:"content"`
}
type Filter struct {
Reason string `json:"reason"`
Message string `json:"message"`
}
type Prompt struct {
Messages []ChatMessage `json:"messages"`
}
type ChatRequest struct {
Prompt Prompt `json:"prompt"`
Temperature float64 `json:"temperature,omitempty"`
CandidateCount int `json:"candidateCount,omitempty"`
TopP float64 `json:"topP,omitempty"`
TopK int `json:"topK,omitempty"`
}
type Error struct {
Code int `json:"code"`
Message string `json:"message"`
Status string `json:"status"`
}
type ChatResponse struct {
Candidates []ChatMessage `json:"candidates"`
Messages []model.Message `json:"messages"`
Filters []Filter `json:"filters"`
Error Error `json:"error"`
}

View File

@@ -1,4 +1,4 @@
package google
package palm
import (
"encoding/json"
@@ -9,6 +9,7 @@ import (
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/model"
"io"
"net/http"
)
@@ -16,10 +17,10 @@ import (
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage#request-body
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage#response-body
func ConvertPaLMRequest(textRequest openai.GeneralOpenAIRequest) *PaLMChatRequest {
palmRequest := PaLMChatRequest{
Prompt: PaLMPrompt{
Messages: make([]PaLMChatMessage, 0, len(textRequest.Messages)),
func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
palmRequest := ChatRequest{
Prompt: Prompt{
Messages: make([]ChatMessage, 0, len(textRequest.Messages)),
},
Temperature: textRequest.Temperature,
CandidateCount: textRequest.N,
@@ -27,7 +28,7 @@ func ConvertPaLMRequest(textRequest openai.GeneralOpenAIRequest) *PaLMChatReques
TopK: textRequest.MaxTokens,
}
for _, message := range textRequest.Messages {
palmMessage := PaLMChatMessage{
palmMessage := ChatMessage{
Content: message.StringContent(),
}
if message.Role == "user" {
@@ -40,14 +41,14 @@ func ConvertPaLMRequest(textRequest openai.GeneralOpenAIRequest) *PaLMChatReques
return &palmRequest
}
func responsePaLM2OpenAI(response *PaLMChatResponse) *openai.TextResponse {
func responsePaLM2OpenAI(response *ChatResponse) *openai.TextResponse {
fullTextResponse := openai.TextResponse{
Choices: make([]openai.TextResponseChoice, 0, len(response.Candidates)),
}
for i, candidate := range response.Candidates {
choice := openai.TextResponseChoice{
Index: i,
Message: openai.Message{
Message: model.Message{
Role: "assistant",
Content: candidate.Content,
},
@@ -58,7 +59,7 @@ func responsePaLM2OpenAI(response *PaLMChatResponse) *openai.TextResponse {
return &fullTextResponse
}
func streamResponsePaLM2OpenAI(palmResponse *PaLMChatResponse) *openai.ChatCompletionsStreamResponse {
func streamResponsePaLM2OpenAI(palmResponse *ChatResponse) *openai.ChatCompletionsStreamResponse {
var choice openai.ChatCompletionsStreamResponseChoice
if len(palmResponse.Candidates) > 0 {
choice.Delta.Content = palmResponse.Candidates[0].Content
@@ -71,7 +72,7 @@ func streamResponsePaLM2OpenAI(palmResponse *PaLMChatResponse) *openai.ChatCompl
return &response
}
func PaLMStreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, string) {
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, string) {
responseText := ""
responseId := fmt.Sprintf("chatcmpl-%s", helper.GetUUID())
createdTime := helper.GetTimestamp()
@@ -90,7 +91,7 @@ func PaLMStreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithSt
stopChan <- true
return
}
var palmResponse PaLMChatResponse
var palmResponse ChatResponse
err = json.Unmarshal(responseBody, &palmResponse)
if err != nil {
logger.SysError("error unmarshalling stream response: " + err.Error())
@@ -130,7 +131,7 @@ func PaLMStreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithSt
return nil, responseText
}
func PaLMHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*openai.ErrorWithStatusCode, *openai.Usage) {
func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) {
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
@@ -139,14 +140,14 @@ func PaLMHandler(c *gin.Context, resp *http.Response, promptTokens int, model st
if err != nil {
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
}
var palmResponse PaLMChatResponse
var palmResponse ChatResponse
err = json.Unmarshal(responseBody, &palmResponse)
if err != nil {
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
}
if palmResponse.Error.Code != 0 || len(palmResponse.Candidates) == 0 {
return &openai.ErrorWithStatusCode{
Error: openai.Error{
return &model.ErrorWithStatusCode{
Error: model.Error{
Message: palmResponse.Error.Message,
Type: palmResponse.Error.Status,
Param: "",
@@ -156,9 +157,9 @@ func PaLMHandler(c *gin.Context, resp *http.Response, promptTokens int, model st
}, nil
}
fullTextResponse := responsePaLM2OpenAI(&palmResponse)
fullTextResponse.Model = model
completionTokens := openai.CountTokenText(palmResponse.Candidates[0].Content, model)
usage := openai.Usage{
fullTextResponse.Model = modelName
completionTokens := openai.CountTokenText(palmResponse.Candidates[0].Content, modelName)
usage := model.Usage{
PromptTokens: promptTokens,
CompletionTokens: completionTokens,
TotalTokens: promptTokens + completionTokens,

View File

@@ -1,22 +1,76 @@
package tencent
import (
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/channel/openai"
"net/http"
)
// import (
// "errors"
// "fmt"
// "github.com/gin-gonic/gin"
// "github.com/songquanpeng/one-api/relay/channel"
// "github.com/songquanpeng/one-api/relay/channel/openai"
// "github.com/songquanpeng/one-api/relay/model"
// "github.com/songquanpeng/one-api/relay/util"
// "io"
// "net/http"
// "strings"
// )
type Adaptor struct {
}
// // https://cloud.tencent.com/document/api/1729/101837
func (a *Adaptor) Auth(c *gin.Context) error {
return nil
}
// type Adaptor struct {
// Sign string
// }
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) {
return nil, nil
}
// func (a *Adaptor) Init(meta *util.RelayMeta) {
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) {
return nil, nil, nil
}
// }
// func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
// return fmt.Sprintf("%s/hyllm/v1/chat/completions", meta.BaseURL), nil
// }
// func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
// channel.SetupCommonRequestHeader(c, req, meta)
// req.Header.Set("Authorization", a.Sign)
// req.Header.Set("X-TC-Action", meta.ActualModelName)
// return nil
// }
// func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
// if request == nil {
// return nil, errors.New("request is nil")
// }
// apiKey := c.Request.Header.Get("Authorization")
// apiKey = strings.TrimPrefix(apiKey, "Bearer ")
// appId, secretId, secretKey, err := ParseConfig(apiKey)
// if err != nil {
// return nil, err
// }
// tencentRequest := ConvertRequest(*request)
// tencentRequest.AppId = appId
// tencentRequest.SecretId = secretId
// // we have to calculate the sign here
// a.Sign = GetSign(*tencentRequest, secretKey)
// return tencentRequest, nil
// }
// func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
// return channel.DoRequestHelper(a, c, meta, requestBody)
// }
// func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
// if meta.IsStream {
// var responseText string
// err, responseText = StreamHandler(c, resp)
// usage = openai.ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens)
// } else {
// err, usage = Handler(c, resp)
// }
// return
// }
// func (a *Adaptor) GetModelList() []string {
// return ModelList
// }
// func (a *Adaptor) GetChannelName() string {
// return "tencent"
// }

View File

@@ -0,0 +1,7 @@
package tencent
var ModelList = []string{
"ChatPro",
"ChatStd",
"hunyuan",
}

View File

@@ -1,237 +1,238 @@
package tencent
import (
"bufio"
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/constant"
"io"
"net/http"
"sort"
"strconv"
"strings"
)
// import (
// "bufio"
// "crypto/hmac"
// "crypto/sha1"
// "encoding/base64"
// "encoding/json"
// "errors"
// "fmt"
// "github.com/gin-gonic/gin"
// "github.com/songquanpeng/one-api/common"
// "github.com/songquanpeng/one-api/common/helper"
// "github.com/songquanpeng/one-api/common/logger"
// "github.com/songquanpeng/one-api/relay/channel/openai"
// "github.com/songquanpeng/one-api/relay/constant"
// "github.com/songquanpeng/one-api/relay/model"
// "io"
// "net/http"
// "sort"
// "strconv"
// "strings"
// )
// https://cloud.tencent.com/document/product/1729/97732
// // https://cloud.tencent.com/document/product/1729/97732
func ConvertRequest(request openai.GeneralOpenAIRequest) *ChatRequest {
messages := make([]Message, 0, len(request.Messages))
for i := 0; i < len(request.Messages); i++ {
message := request.Messages[i]
if message.Role == "system" {
messages = append(messages, Message{
Role: "user",
Content: message.StringContent(),
})
messages = append(messages, Message{
Role: "assistant",
Content: "Okay",
})
continue
}
messages = append(messages, Message{
Content: message.StringContent(),
Role: message.Role,
})
}
stream := 0
if request.Stream {
stream = 1
}
return &ChatRequest{
Timestamp: helper.GetTimestamp(),
Expired: helper.GetTimestamp() + 24*60*60,
QueryID: helper.GetUUID(),
Temperature: request.Temperature,
TopP: request.TopP,
Stream: stream,
Messages: messages,
}
}
// func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
// messages := make([]Message, 0, len(request.Messages))
// for i := 0; i < len(request.Messages); i++ {
// message := request.Messages[i]
// if message.Role == "system" {
// messages = append(messages, Message{
// Role: "user",
// Content: message.StringContent(),
// })
// messages = append(messages, Message{
// Role: "assistant",
// Content: "Okay",
// })
// continue
// }
// messages = append(messages, Message{
// Content: message.StringContent(),
// Role: message.Role,
// })
// }
// stream := 0
// if request.Stream {
// stream = 1
// }
// return &ChatRequest{
// Timestamp: helper.GetTimestamp(),
// Expired: helper.GetTimestamp() + 24*60*60,
// QueryID: helper.GetUUID(),
// Temperature: request.Temperature,
// TopP: request.TopP,
// Stream: stream,
// Messages: messages,
// }
// }
func responseTencent2OpenAI(response *ChatResponse) *openai.TextResponse {
fullTextResponse := openai.TextResponse{
Object: "chat.completion",
Created: helper.GetTimestamp(),
Usage: response.Usage,
}
if len(response.Choices) > 0 {
choice := openai.TextResponseChoice{
Index: 0,
Message: openai.Message{
Role: "assistant",
Content: response.Choices[0].Messages.Content,
},
FinishReason: response.Choices[0].FinishReason,
}
fullTextResponse.Choices = append(fullTextResponse.Choices, choice)
}
return &fullTextResponse
}
// func responseTencent2OpenAI(response *ChatResponse) *openai.TextResponse {
// fullTextResponse := openai.TextResponse{
// Object: "chat.completion",
// Created: helper.GetTimestamp(),
// Usage: response.Usage,
// }
// if len(response.Choices) > 0 {
// choice := openai.TextResponseChoice{
// Index: 0,
// Message: model.Message{
// Role: "assistant",
// Content: response.Choices[0].Messages.Content,
// },
// FinishReason: response.Choices[0].FinishReason,
// }
// fullTextResponse.Choices = append(fullTextResponse.Choices, choice)
// }
// return &fullTextResponse
// }
func streamResponseTencent2OpenAI(TencentResponse *ChatResponse) *openai.ChatCompletionsStreamResponse {
response := openai.ChatCompletionsStreamResponse{
Object: "chat.completion.chunk",
Created: helper.GetTimestamp(),
Model: "tencent-hunyuan",
}
if len(TencentResponse.Choices) > 0 {
var choice openai.ChatCompletionsStreamResponseChoice
choice.Delta.Content = TencentResponse.Choices[0].Delta.Content
if TencentResponse.Choices[0].FinishReason == "stop" {
choice.FinishReason = &constant.StopFinishReason
}
response.Choices = append(response.Choices, choice)
}
return &response
}
// func streamResponseTencent2OpenAI(TencentResponse *ChatResponse) *openai.ChatCompletionsStreamResponse {
// response := openai.ChatCompletionsStreamResponse{
// Object: "chat.completion.chunk",
// Created: helper.GetTimestamp(),
// Model: "tencent-hunyuan",
// }
// if len(TencentResponse.Choices) > 0 {
// var choice openai.ChatCompletionsStreamResponseChoice
// choice.Delta.Content = TencentResponse.Choices[0].Delta.Content
// if TencentResponse.Choices[0].FinishReason == "stop" {
// choice.FinishReason = &constant.StopFinishReason
// }
// response.Choices = append(response.Choices, choice)
// }
// return &response
// }
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, string) {
var responseText string
scanner := bufio.NewScanner(resp.Body)
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := strings.Index(string(data), "\n"); i >= 0 {
return i + 1, data[0:i], nil
}
if atEOF {
return len(data), data, nil
}
return 0, nil, nil
})
dataChan := make(chan string)
stopChan := make(chan bool)
go func() {
for scanner.Scan() {
data := scanner.Text()
if len(data) < 5 { // ignore blank line or wrong format
continue
}
if data[:5] != "data:" {
continue
}
data = data[5:]
dataChan <- data
}
stopChan <- true
}()
common.SetEventStreamHeaders(c)
c.Stream(func(w io.Writer) bool {
select {
case data := <-dataChan:
var TencentResponse ChatResponse
err := json.Unmarshal([]byte(data), &TencentResponse)
if err != nil {
logger.SysError("error unmarshalling stream response: " + err.Error())
return true
}
response := streamResponseTencent2OpenAI(&TencentResponse)
if len(response.Choices) != 0 {
responseText += response.Choices[0].Delta.Content
}
jsonResponse, err := json.Marshal(response)
if err != nil {
logger.SysError("error marshalling stream response: " + err.Error())
return true
}
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
return true
case <-stopChan:
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
return false
}
})
err := resp.Body.Close()
if err != nil {
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
}
return nil, responseText
}
// func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, string) {
// var responseText string
// scanner := bufio.NewScanner(resp.Body)
// scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
// if atEOF && len(data) == 0 {
// return 0, nil, nil
// }
// if i := strings.Index(string(data), "\n"); i >= 0 {
// return i + 1, data[0:i], nil
// }
// if atEOF {
// return len(data), data, nil
// }
// return 0, nil, nil
// })
// dataChan := make(chan string)
// stopChan := make(chan bool)
// go func() {
// for scanner.Scan() {
// data := scanner.Text()
// if len(data) < 5 { // ignore blank line or wrong format
// continue
// }
// if data[:5] != "data:" {
// continue
// }
// data = data[5:]
// dataChan <- data
// }
// stopChan <- true
// }()
// common.SetEventStreamHeaders(c)
// c.Stream(func(w io.Writer) bool {
// select {
// case data := <-dataChan:
// var TencentResponse ChatResponse
// err := json.Unmarshal([]byte(data), &TencentResponse)
// if err != nil {
// logger.SysError("error unmarshalling stream response: " + err.Error())
// return true
// }
// response := streamResponseTencent2OpenAI(&TencentResponse)
// if len(response.Choices) != 0 {
// responseText += response.Choices[0].Delta.Content
// }
// jsonResponse, err := json.Marshal(response)
// if err != nil {
// logger.SysError("error marshalling stream response: " + err.Error())
// return true
// }
// c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
// return true
// case <-stopChan:
// c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
// return false
// }
// })
// err := resp.Body.Close()
// if err != nil {
// return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
// }
// return nil, responseText
// }
func Handler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
var TencentResponse ChatResponse
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
}
err = resp.Body.Close()
if err != nil {
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
}
err = json.Unmarshal(responseBody, &TencentResponse)
if err != nil {
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
}
if TencentResponse.Error.Code != 0 {
return &openai.ErrorWithStatusCode{
Error: openai.Error{
Message: TencentResponse.Error.Message,
Code: TencentResponse.Error.Code,
},
StatusCode: resp.StatusCode,
}, nil
}
fullTextResponse := responseTencent2OpenAI(&TencentResponse)
fullTextResponse.Model = "hunyuan"
jsonResponse, err := json.Marshal(fullTextResponse)
if err != nil {
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
}
c.Writer.Header().Set("Content-Type", "application/json")
c.Writer.WriteHeader(resp.StatusCode)
_, err = c.Writer.Write(jsonResponse)
if err != nil {
return openai.ErrorWrapper(err, "write_response_body_failed", http.StatusInternalServerError), nil
}
return nil, &fullTextResponse.Usage
}
// func Handler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
// var TencentResponse ChatResponse
// responseBody, err := io.ReadAll(resp.Body)
// if err != nil {
// return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
// }
// err = resp.Body.Close()
// if err != nil {
// return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
// }
// err = json.Unmarshal(responseBody, &TencentResponse)
// if err != nil {
// return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
// }
// if TencentResponse.Error.Code != 0 {
// return &model.ErrorWithStatusCode{
// Error: model.Error{
// Message: TencentResponse.Error.Message,
// Code: TencentResponse.Error.Code,
// },
// StatusCode: resp.StatusCode,
// }, nil
// }
// fullTextResponse := responseTencent2OpenAI(&TencentResponse)
// fullTextResponse.Model = "hunyuan"
// jsonResponse, err := json.Marshal(fullTextResponse)
// if err != nil {
// return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
// }
// c.Writer.Header().Set("Content-Type", "application/json")
// c.Writer.WriteHeader(resp.StatusCode)
// _, err = c.Writer.Write(jsonResponse)
// if err != nil {
// return openai.ErrorWrapper(err, "write_response_body_failed", http.StatusInternalServerError), nil
// }
// return nil, &fullTextResponse.Usage
// }
func ParseConfig(config string) (appId int64, secretId string, secretKey string, err error) {
parts := strings.Split(config, "|")
if len(parts) != 3 {
err = errors.New("invalid tencent config")
return
}
appId, err = strconv.ParseInt(parts[0], 10, 64)
secretId = parts[1]
secretKey = parts[2]
return
}
// func ParseConfig(config string) (appId int64, secretId string, secretKey string, err error) {
// parts := strings.Split(config, "|")
// if len(parts) != 3 {
// err = errors.New("invalid tencent config")
// return
// }
// appId, err = strconv.ParseInt(parts[0], 10, 64)
// secretId = parts[1]
// secretKey = parts[2]
// return
// }
func GetSign(req ChatRequest, secretKey string) string {
params := make([]string, 0)
params = append(params, "app_id="+strconv.FormatInt(req.AppId, 10))
params = append(params, "secret_id="+req.SecretId)
params = append(params, "timestamp="+strconv.FormatInt(req.Timestamp, 10))
params = append(params, "query_id="+req.QueryID)
params = append(params, "temperature="+strconv.FormatFloat(req.Temperature, 'f', -1, 64))
params = append(params, "top_p="+strconv.FormatFloat(req.TopP, 'f', -1, 64))
params = append(params, "stream="+strconv.Itoa(req.Stream))
params = append(params, "expired="+strconv.FormatInt(req.Expired, 10))
// func GetSign(req ChatRequest, secretKey string) string {
// params := make([]string, 0)
// params = append(params, "app_id="+strconv.FormatInt(req.AppId, 10))
// params = append(params, "secret_id="+req.SecretId)
// params = append(params, "timestamp="+strconv.FormatInt(req.Timestamp, 10))
// params = append(params, "query_id="+req.QueryID)
// params = append(params, "temperature="+strconv.FormatFloat(req.Temperature, 'f', -1, 64))
// params = append(params, "top_p="+strconv.FormatFloat(req.TopP, 'f', -1, 64))
// params = append(params, "stream="+strconv.Itoa(req.Stream))
// params = append(params, "expired="+strconv.FormatInt(req.Expired, 10))
var messageStr string
for _, msg := range req.Messages {
messageStr += fmt.Sprintf(`{"role":"%s","content":"%s"},`, msg.Role, msg.Content)
}
messageStr = strings.TrimSuffix(messageStr, ",")
params = append(params, "messages=["+messageStr+"]")
// var messageStr string
// for _, msg := range req.Messages {
// messageStr += fmt.Sprintf(`{"role":"%s","content":"%s"},`, msg.Role, msg.Content)
// }
// messageStr = strings.TrimSuffix(messageStr, ",")
// params = append(params, "messages=["+messageStr+"]")
sort.Strings(params)
url := "hunyuan.cloud.tencent.com/hyllm/v1/chat/completions?" + strings.Join(params, "&")
mac := hmac.New(sha1.New, []byte(secretKey))
signURL := url
mac.Write([]byte(signURL))
sign := mac.Sum([]byte(nil))
return base64.StdEncoding.EncodeToString(sign)
}
// sort.Strings(params)
// url := "hunyuan.cloud.tencent.com/hyllm/v1/chat/completions?" + strings.Join(params, "&")
// mac := hmac.New(sha1.New, []byte(secretKey))
// signURL := url
// mac.Write([]byte(signURL))
// sign := mac.Sum([]byte(nil))
// return base64.StdEncoding.EncodeToString(sign)
// }

View File

@@ -1,63 +1,63 @@
package tencent
import (
"github.com/songquanpeng/one-api/relay/channel/openai"
)
// import (
// "github.com/songquanpeng/one-api/relay/model"
// )
type Message struct {
Role string `json:"role"`
Content string `json:"content"`
}
// type Message struct {
// Role string `json:"role"`
// Content string `json:"content"`
// }
type ChatRequest struct {
AppId int64 `json:"app_id"` // 腾讯云账号的 APPID
SecretId string `json:"secret_id"` // 官网 SecretId
// Timestamp当前 UNIX 时间戳,单位为秒,可记录发起 API 请求的时间。
// 例如1529223702如果与当前时间相差过大会引起签名过期错误
Timestamp int64 `json:"timestamp"`
// Expired 签名的有效期,是一个符合 UNIX Epoch 时间戳规范的数值,
// 单位为秒Expired 必须大于 Timestamp 且 Expired-Timestamp 小于90天
Expired int64 `json:"expired"`
QueryID string `json:"query_id"` //请求 Id用于问题排查
// Temperature 较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定
// 默认 1.0,取值区间为[0.0,2.0],非必要不建议使用,不合理的取值会影响效果
// 建议该参数和 top_p 只设置1个不要同时更改 top_p
Temperature float64 `json:"temperature"`
// TopP 影响输出文本的多样性,取值越大,生成文本的多样性越强
// 默认1.0,取值区间为[0.0, 1.0],非必要不建议使用, 不合理的取值会影响效果
// 建议该参数和 temperature 只设置1个不要同时更改
TopP float64 `json:"top_p"`
// Stream 0同步1流式 默认协议SSE)
// 同步请求超时60s如果内容较长建议使用流式
Stream int `json:"stream"`
// Messages 会话内容, 长度最多为40, 按对话时间从旧到新在数组中排列
// 输入 content 总数最大支持 3000 token。
Messages []Message `json:"messages"`
}
// type ChatRequest struct {
// AppId int64 `json:"app_id"` // 腾讯云账号的 APPID
// SecretId string `json:"secret_id"` // 官网 SecretId
// // Timestamp当前 UNIX 时间戳,单位为秒,可记录发起 API 请求的时间。
// // 例如1529223702如果与当前时间相差过大会引起签名过期错误
// Timestamp int64 `json:"timestamp"`
// // Expired 签名的有效期,是一个符合 UNIX Epoch 时间戳规范的数值,
// // 单位为秒Expired 必须大于 Timestamp 且 Expired-Timestamp 小于90天
// Expired int64 `json:"expired"`
// QueryID string `json:"query_id"` //请求 Id用于问题排查
// // Temperature 较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定
// // 默认 1.0,取值区间为[0.0,2.0],非必要不建议使用,不合理的取值会影响效果
// // 建议该参数和 top_p 只设置1个不要同时更改 top_p
// Temperature float64 `json:"temperature"`
// // TopP 影响输出文本的多样性,取值越大,生成文本的多样性越强
// // 默认1.0,取值区间为[0.0, 1.0],非必要不建议使用, 不合理的取值会影响效果
// // 建议该参数和 temperature 只设置1个不要同时更改
// TopP float64 `json:"top_p"`
// // Stream 0同步1流式 默认协议SSE)
// // 同步请求超时60s如果内容较长建议使用流式
// Stream int `json:"stream"`
// // Messages 会话内容, 长度最多为40, 按对话时间从旧到新在数组中排列
// // 输入 content 总数最大支持 3000 token。
// Messages []Message `json:"messages"`
// }
type Error struct {
Code int `json:"code"`
Message string `json:"message"`
}
// type Error struct {
// Code int `json:"code"`
// Message string `json:"message"`
// }
type Usage struct {
InputTokens int `json:"input_tokens"`
OutputTokens int `json:"output_tokens"`
TotalTokens int `json:"total_tokens"`
}
// type Usage struct {
// InputTokens int `json:"input_tokens"`
// OutputTokens int `json:"output_tokens"`
// TotalTokens int `json:"total_tokens"`
// }
type ResponseChoices struct {
FinishReason string `json:"finish_reason,omitempty"` // 流式结束标志位,为 stop 则表示尾包
Messages Message `json:"messages,omitempty"` // 内容,同步模式返回内容,流模式为 null 输出 content 内容总数最多支持 1024token。
Delta Message `json:"delta,omitempty"` // 内容,流模式返回内容,同步模式为 null 输出 content 内容总数最多支持 1024token。
}
// type ResponseChoices struct {
// FinishReason string `json:"finish_reason,omitempty"` // 流式结束标志位,为 stop 则表示尾包
// Messages Message `json:"messages,omitempty"` // 内容,同步模式返回内容,流模式为 null 输出 content 内容总数最多支持 1024token。
// Delta Message `json:"delta,omitempty"` // 内容,流模式返回内容,同步模式为 null 输出 content 内容总数最多支持 1024token。
// }
type ChatResponse struct {
Choices []ResponseChoices `json:"choices,omitempty"` // 结果
Created string `json:"created,omitempty"` // unix 时间戳的字符串
Id string `json:"id,omitempty"` // 会话 id
Usage openai.Usage `json:"usage,omitempty"` // token 数量
Error Error `json:"error,omitempty"` // 错误信息 注意:此字段可能返回 null表示取不到有效值
Note string `json:"note,omitempty"` // 注释
ReqID string `json:"req_id,omitempty"` // 唯一请求 Id每次请求都会返回。用于反馈接口入参
}
// type ChatResponse struct {
// Choices []ResponseChoices `json:"choices,omitempty"` // 结果
// Created string `json:"created,omitempty"` // unix 时间戳的字符串
// Id string `json:"id,omitempty"` // 会话 id
// Usage model.Usage `json:"usage,omitempty"` // token 数量
// Error Error `json:"error,omitempty"` // 错误信息 注意:此字段可能返回 null表示取不到有效值
// Note string `json:"note,omitempty"` // 注释
// ReqID string `json:"req_id,omitempty"` // 唯一请求 Id每次请求都会返回。用于反馈接口入参
// }

View File

@@ -1,22 +1,70 @@
package xunfei
import (
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/channel/openai"
"net/http"
)
// import (
// "errors"
// "github.com/gin-gonic/gin"
// "github.com/songquanpeng/one-api/relay/channel"
// "github.com/songquanpeng/one-api/relay/channel/openai"
// "github.com/songquanpeng/one-api/relay/model"
// "github.com/songquanpeng/one-api/relay/util"
// "io"
// "net/http"
// "strings"
// )
type Adaptor struct {
}
// type Adaptor struct {
// request *model.GeneralOpenAIRequest
// }
func (a *Adaptor) Auth(c *gin.Context) error {
return nil
}
// func (a *Adaptor) Init(meta *util.RelayMeta) {
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) {
return nil, nil
}
// }
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) {
return nil, nil, nil
}
// func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
// return "", nil
// }
// func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
// channel.SetupCommonRequestHeader(c, req, meta)
// // check DoResponse for auth part
// return nil
// }
// func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
// if request == nil {
// return nil, errors.New("request is nil")
// }
// a.request = request
// return nil, nil
// }
// func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
// // xunfei's request is not http request, so we don't need to do anything here
// dummyResp := &http.Response{}
// dummyResp.StatusCode = http.StatusOK
// return dummyResp, nil
// }
// func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
// splits := strings.Split(meta.APIKey, "|")
// if len(splits) != 3 {
// return nil, openai.ErrorWrapper(errors.New("invalid auth"), "invalid_auth", http.StatusBadRequest)
// }
// if a.request == nil {
// return nil, openai.ErrorWrapper(errors.New("request is nil"), "request_is_nil", http.StatusBadRequest)
// }
// if meta.IsStream {
// err, usage = StreamHandler(c, *a.request, splits[0], splits[1], splits[2])
// } else {
// err, usage = Handler(c, *a.request, splits[0], splits[1], splits[2])
// }
// return
// }
// func (a *Adaptor) GetModelList() []string {
// return ModelList
// }
// func (a *Adaptor) GetChannelName() string {
// return "xunfei"
// }

View File

@@ -0,0 +1,9 @@
package xunfei
var ModelList = []string{
"SparkDesk",
"SparkDesk-v1.1",
"SparkDesk-v2.1",
"SparkDesk-v3.1",
"SparkDesk-v3.5",
}

View File

@@ -1,61 +1,61 @@
package xunfei
import (
"github.com/songquanpeng/one-api/relay/channel/openai"
)
// import (
// "github.com/songquanpeng/one-api/relay/model"
// )
type Message struct {
Role string `json:"role"`
Content string `json:"content"`
}
// type Message struct {
// Role string `json:"role"`
// Content string `json:"content"`
// }
type ChatRequest struct {
Header struct {
AppId string `json:"app_id"`
} `json:"header"`
Parameter struct {
Chat struct {
Domain string `json:"domain,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopK int `json:"top_k,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
Auditing bool `json:"auditing,omitempty"`
} `json:"chat"`
} `json:"parameter"`
Payload struct {
Message struct {
Text []Message `json:"text"`
} `json:"message"`
} `json:"payload"`
}
// type ChatRequest struct {
// Header struct {
// AppId string `json:"app_id"`
// } `json:"header"`
// Parameter struct {
// Chat struct {
// Domain string `json:"domain,omitempty"`
// Temperature float64 `json:"temperature,omitempty"`
// TopK int `json:"top_k,omitempty"`
// MaxTokens int `json:"max_tokens,omitempty"`
// Auditing bool `json:"auditing,omitempty"`
// } `json:"chat"`
// } `json:"parameter"`
// Payload struct {
// Message struct {
// Text []Message `json:"text"`
// } `json:"message"`
// } `json:"payload"`
// }
type ChatResponseTextItem struct {
Content string `json:"content"`
Role string `json:"role"`
Index int `json:"index"`
}
// type ChatResponseTextItem struct {
// Content string `json:"content"`
// Role string `json:"role"`
// Index int `json:"index"`
// }
type ChatResponse struct {
Header struct {
Code int `json:"code"`
Message string `json:"message"`
Sid string `json:"sid"`
Status int `json:"status"`
} `json:"header"`
Payload struct {
Choices struct {
Status int `json:"status"`
Seq int `json:"seq"`
Text []ChatResponseTextItem `json:"text"`
} `json:"choices"`
Usage struct {
//Text struct {
// QuestionTokens string `json:"question_tokens"`
// PromptTokens string `json:"prompt_tokens"`
// CompletionTokens string `json:"completion_tokens"`
// TotalTokens string `json:"total_tokens"`
//} `json:"text"`
Text openai.Usage `json:"text"`
} `json:"usage"`
} `json:"payload"`
}
// type ChatResponse struct {
// Header struct {
// Code int `json:"code"`
// Message string `json:"message"`
// Sid string `json:"sid"`
// Status int `json:"status"`
// } `json:"header"`
// Payload struct {
// Choices struct {
// Status int `json:"status"`
// Seq int `json:"seq"`
// Text []ChatResponseTextItem `json:"text"`
// } `json:"choices"`
// Usage struct {
// //Text struct {
// // QuestionTokens string `json:"question_tokens"`
// // PromptTokens string `json:"prompt_tokens"`
// // CompletionTokens string `json:"completion_tokens"`
// // TotalTokens string `json:"total_tokens"`
// //} `json:"text"`
// Text model.Usage `json:"text"`
// } `json:"usage"`
// } `json:"payload"`
// }

View File

@@ -1,22 +1,62 @@
package zhipu
import (
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/channel/openai"
"net/http"
)
// import (
// "errors"
// "fmt"
// "github.com/gin-gonic/gin"
// "github.com/songquanpeng/one-api/relay/channel"
// "github.com/songquanpeng/one-api/relay/model"
// "github.com/songquanpeng/one-api/relay/util"
// "io"
// "net/http"
// )
type Adaptor struct {
}
// type Adaptor struct {
// }
func (a *Adaptor) Auth(c *gin.Context) error {
return nil
}
// func (a *Adaptor) Init(meta *util.RelayMeta) {
func (a *Adaptor) ConvertRequest(request *openai.GeneralOpenAIRequest) (any, error) {
return nil, nil
}
// }
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage, error) {
return nil, nil, nil
}
// func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) {
// method := "invoke"
// if meta.IsStream {
// method = "sse-invoke"
// }
// return fmt.Sprintf("%s/api/paas/v3/model-api/%s/%s", meta.BaseURL, meta.ActualModelName, method), nil
// }
// func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error {
// channel.SetupCommonRequestHeader(c, req, meta)
// token := GetToken(meta.APIKey)
// req.Header.Set("Authorization", token)
// return nil
// }
// func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
// if request == nil {
// return nil, errors.New("request is nil")
// }
// return ConvertRequest(*request), nil
// }
// func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io.Reader) (*http.Response, error) {
// return channel.DoRequestHelper(a, c, meta, requestBody)
// }
// func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
// if meta.IsStream {
// err, usage = StreamHandler(c, resp)
// } else {
// err, usage = Handler(c, resp)
// }
// return
// }
// func (a *Adaptor) GetModelList() []string {
// return ModelList
// }
// func (a *Adaptor) GetChannelName() string {
// return "zhipu"
// }

View File

@@ -0,0 +1,5 @@
package zhipu
// var ModelList = []string{
// "chatglm_turbo", "chatglm_pro", "chatglm_std", "chatglm_lite",
// }

View File

@@ -1,46 +1,46 @@
package zhipu
import (
"github.com/songquanpeng/one-api/relay/channel/openai"
"time"
)
// import (
// "github.com/songquanpeng/one-api/relay/model"
// "time"
// )
type Message struct {
Role string `json:"role"`
Content string `json:"content"`
}
// type Message struct {
// Role string `json:"role"`
// Content string `json:"content"`
// }
type Request struct {
Prompt []Message `json:"prompt"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
RequestId string `json:"request_id,omitempty"`
Incremental bool `json:"incremental,omitempty"`
}
// type Request struct {
// Prompt []Message `json:"prompt"`
// Temperature float64 `json:"temperature,omitempty"`
// TopP float64 `json:"top_p,omitempty"`
// RequestId string `json:"request_id,omitempty"`
// Incremental bool `json:"incremental,omitempty"`
// }
type ResponseData struct {
TaskId string `json:"task_id"`
RequestId string `json:"request_id"`
TaskStatus string `json:"task_status"`
Choices []Message `json:"choices"`
openai.Usage `json:"usage"`
}
// type ResponseData struct {
// TaskId string `json:"task_id"`
// RequestId string `json:"request_id"`
// TaskStatus string `json:"task_status"`
// Choices []Message `json:"choices"`
// model.Usage `json:"usage"`
// }
type Response struct {
Code int `json:"code"`
Msg string `json:"msg"`
Success bool `json:"success"`
Data ResponseData `json:"data"`
}
// type Response struct {
// Code int `json:"code"`
// Msg string `json:"msg"`
// Success bool `json:"success"`
// Data ResponseData `json:"data"`
// }
type StreamMetaResponse struct {
RequestId string `json:"request_id"`
TaskId string `json:"task_id"`
TaskStatus string `json:"task_status"`
openai.Usage `json:"usage"`
}
// type StreamMetaResponse struct {
// RequestId string `json:"request_id"`
// TaskId string `json:"task_id"`
// TaskStatus string `json:"task_status"`
// model.Usage `json:"usage"`
// }
type tokenData struct {
Token string
ExpiryTime time.Time
}
// type tokenData struct {
// Token string
// ExpiryTime time.Time
// }

View File

@@ -6,7 +6,7 @@ import (
const (
APITypeOpenAI = iota
APITypeClaude
APITypeAnthropic
APITypePaLM
APITypeBaidu
APITypeZhipu
@@ -15,13 +15,15 @@ const (
APITypeAIProxyLibrary
APITypeTencent
APITypeGemini
APITypeDummy // this one is only for count, do not add any channel after this
)
func ChannelType2APIType(channelType int) int {
apiType := APITypeOpenAI
switch channelType {
case common.ChannelTypeAnthropic:
apiType = APITypeClaude
apiType = APITypeAnthropic
case common.ChannelTypeBaidu:
apiType = APITypeBaidu
case common.ChannelTypePaLM:
@@ -41,29 +43,3 @@ func ChannelType2APIType(channelType int) int {
}
return apiType
}
//func GetAdaptor(apiType int) channel.Adaptor {
// switch apiType {
// case APITypeOpenAI:
// return &openai.Adaptor{}
// case APITypeClaude:
// return &anthropic.Adaptor{}
// case APITypePaLM:
// return &google.Adaptor{}
// case APITypeZhipu:
// return &baidu.Adaptor{}
// case APITypeBaidu:
// return &baidu.Adaptor{}
// case APITypeAli:
// return &ali.Adaptor{}
// case APITypeXunfei:
// return &xunfei.Adaptor{}
// case APITypeAIProxyLibrary:
// return &aiproxy.Adaptor{}
// case APITypeTencent:
// return &tencent.Adaptor{}
// case APITypeGemini:
// return &google.Adaptor{}
// }
// return nil
//}

View File

@@ -14,13 +14,14 @@ import (
"github.com/songquanpeng/one-api/model"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/constant"
relaymodel "github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http"
"strings"
)
func RelayAudioHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode {
func RelayAudioHelper(c *gin.Context, relayMode int) *relaymodel.ErrorWithStatusCode {
audioModel := "whisper-1"
tokenId := c.GetInt("token_id")

View File

@@ -11,14 +11,14 @@ import (
"github.com/songquanpeng/one-api/model"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/constant"
relaymodel "github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"math"
"net/http"
)
func getAndValidateTextRequest(c *gin.Context, relayMode int) (*openai.GeneralOpenAIRequest, error) {
textRequest := &openai.GeneralOpenAIRequest{}
func getAndValidateTextRequest(c *gin.Context, relayMode int) (*relaymodel.GeneralOpenAIRequest, error) {
textRequest := &relaymodel.GeneralOpenAIRequest{}
err := common.UnmarshalBodyReusable(c, textRequest)
if err != nil {
return nil, err
@@ -36,7 +36,7 @@ func getAndValidateTextRequest(c *gin.Context, relayMode int) (*openai.GeneralOp
return textRequest, nil
}
func getPromptTokens(textRequest *openai.GeneralOpenAIRequest, relayMode int) int {
func getPromptTokens(textRequest *relaymodel.GeneralOpenAIRequest, relayMode int) int {
switch relayMode {
case constant.RelayModeChatCompletions:
return openai.CountTokenMessages(textRequest.Messages, textRequest.Model)
@@ -48,7 +48,7 @@ func getPromptTokens(textRequest *openai.GeneralOpenAIRequest, relayMode int) in
return 0
}
func getPreConsumedQuota(textRequest *openai.GeneralOpenAIRequest, promptTokens int, ratio float64) int {
func getPreConsumedQuota(textRequest *relaymodel.GeneralOpenAIRequest, promptTokens int, ratio float64) int {
preConsumedTokens := config.PreConsumedQuota
if textRequest.MaxTokens != 0 {
preConsumedTokens = promptTokens + textRequest.MaxTokens
@@ -56,7 +56,7 @@ func getPreConsumedQuota(textRequest *openai.GeneralOpenAIRequest, promptTokens
return int(float64(preConsumedTokens) * ratio)
}
func preConsumeQuota(ctx context.Context, textRequest *openai.GeneralOpenAIRequest, promptTokens int, ratio float64, meta *util.RelayMeta) (int, *openai.ErrorWithStatusCode) {
func preConsumeQuota(ctx context.Context, textRequest *relaymodel.GeneralOpenAIRequest, promptTokens int, ratio float64, meta *util.RelayMeta) (int, *relaymodel.ErrorWithStatusCode) {
preConsumedQuota := getPreConsumedQuota(textRequest, promptTokens, ratio)
userQuota, err := model.CacheGetUserQuota(meta.UserId)
@@ -85,7 +85,7 @@ func preConsumeQuota(ctx context.Context, textRequest *openai.GeneralOpenAIReque
return preConsumedQuota, nil
}
func postConsumeQuota(ctx context.Context, usage *openai.Usage, meta *util.RelayMeta, textRequest *openai.GeneralOpenAIRequest, ratio float64, preConsumedQuota int, modelRatio float64, groupRatio float64) {
func postConsumeQuota(ctx context.Context, usage *relaymodel.Usage, meta *util.RelayMeta, textRequest *relaymodel.GeneralOpenAIRequest, ratio float64, preConsumedQuota int, modelRatio float64, groupRatio float64) {
if usage == nil {
logger.Error(ctx, "usage is nil, which is unexpected")
return
@@ -120,27 +120,3 @@ func postConsumeQuota(ctx context.Context, usage *openai.Usage, meta *util.Relay
model.UpdateChannelUsedQuota(meta.ChannelId, quota)
}
}
func doRequest(ctx context.Context, c *gin.Context, meta *util.RelayMeta, isStream bool, fullRequestURL string, requestBody io.Reader) (*http.Response, error) {
req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
if err != nil {
return nil, err
}
SetupRequestHeaders(c, req, meta, isStream)
resp, err := util.HTTPClient.Do(req)
if err != nil {
return nil, err
}
if resp == nil {
return nil, errors.New("resp is nil")
}
err = req.Body.Close()
if err != nil {
logger.Warnf(ctx, "close req.Body failed: %+v", err)
}
err = c.Request.Body.Close()
if err != nil {
logger.Warnf(ctx, "close c.Request.Body failed: %+v", err)
}
return resp, nil
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/model"
"github.com/songquanpeng/one-api/relay/channel/openai"
relaymodel "github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http"
@@ -28,7 +29,7 @@ func isWithinRange(element string, value int) bool {
return value >= min && value <= max
}
func RelayImageHelper(c *gin.Context, relayMode int) *openai.ErrorWithStatusCode {
func RelayImageHelper(c *gin.Context, relayMode int) *relaymodel.ErrorWithStatusCode {
imageModel := "dall-e-2"
imageSize := "1024x1024"

View File

@@ -1,333 +0,0 @@
package controller
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/relay/channel/aiproxy"
"github.com/songquanpeng/one-api/relay/channel/anthropic"
"github.com/songquanpeng/one-api/relay/channel/google"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/util"
)
func GetRequestURL(requestURL string, meta *util.RelayMeta, textRequest *openai.GeneralOpenAIRequest) (string, error) {
fullRequestURL := util.GetFullRequestURL(meta.BaseURL, requestURL, meta.ChannelType)
switch meta.APIType {
case constant.APITypeOpenAI:
if meta.ChannelType == common.ChannelTypeAzure {
// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api
requestURL := strings.Split(requestURL, "?")[0]
requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, meta.APIVersion)
task := strings.TrimPrefix(requestURL, "/v1/")
model_ := textRequest.Model
model_ = strings.Replace(model_, ".", "", -1)
// https://github.com/songquanpeng/one-api/issues/67
model_ = strings.TrimSuffix(model_, "-0301")
model_ = strings.TrimSuffix(model_, "-0314")
model_ = strings.TrimSuffix(model_, "-0613")
requestURL = fmt.Sprintf("/openai/deployments/%s/%s", model_, task)
fullRequestURL = util.GetFullRequestURL(meta.BaseURL, requestURL, meta.ChannelType)
}
case constant.APITypeClaude:
fullRequestURL = fmt.Sprintf("%s/v1/complete", meta.BaseURL)
// case constant.APITypeBaidu:
// switch textRequest.Model {
// case "ERNIE-Bot":
// fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions"
// case "ERNIE-Bot-turbo":
// fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant"
// case "ERNIE-Bot-4":
// fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro"
// case "BLOOMZ-7B":
// fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/bloomz_7b1"
// case "Embedding-V1":
// fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings/embedding-v1"
// }
// var accessToken string
// var err error
// if accessToken, err = baidu.GetAccessToken(meta.APIKey); err != nil {
// return "", fmt.Errorf("failed to get baidu access token: %w", err)
// }
// fullRequestURL += "?access_token=" + accessToken
case constant.APITypePaLM:
fullRequestURL = fmt.Sprintf("%s/v1beta2/models/chat-bison-001:generateMessage", meta.BaseURL)
case constant.APITypeGemini:
version := helper.AssignOrDefault(meta.APIVersion, "v1")
action := "generateContent"
if textRequest.Stream {
action = "streamGenerateContent"
}
fullRequestURL = fmt.Sprintf("%s/%s/models/%s:%s", meta.BaseURL, version, textRequest.Model, action)
case constant.APITypeZhipu:
method := "invoke"
if textRequest.Stream {
method = "sse-invoke"
}
fullRequestURL = fmt.Sprintf("https://open.bigmodel.cn/api/paas/v3/model-api/%s/%s", textRequest.Model, method)
case constant.APITypeAli:
fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
if meta.Mode == constant.RelayModeEmbeddings {
fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/embeddings/text-embedding/text-embedding"
}
// case constant.APITypeTencent:
// fullRequestURL = "https://hunyuan.cloud.tencent.com/hyllm/v1/chat/completions"
case constant.APITypeAIProxyLibrary:
fullRequestURL = fmt.Sprintf("%s/api/library/ask", meta.BaseURL)
}
return fullRequestURL, nil
}
func GetRequestBody(c *gin.Context, textRequest openai.GeneralOpenAIRequest, isModelMapped bool, apiType int, relayMode int) (io.Reader, error) {
var requestBody io.Reader
if isModelMapped {
jsonStr, err := json.Marshal(textRequest)
if err != nil {
return nil, err
}
requestBody = bytes.NewBuffer(jsonStr)
} else {
requestBody = c.Request.Body
}
switch apiType {
case constant.APITypeClaude:
claudeRequest := anthropic.ConvertRequest(textRequest)
jsonStr, err := json.Marshal(claudeRequest)
if err != nil {
return nil, err
}
requestBody = bytes.NewBuffer(jsonStr)
// case constant.APITypeBaidu:
// var jsonData []byte
// var err error
// switch relayMode {
// case constant.RelayModeEmbeddings:
// baiduEmbeddingRequest := baidu.ConvertEmbeddingRequest(textRequest)
// jsonData, err = json.Marshal(baiduEmbeddingRequest)
// default:
// baiduRequest := baidu.ConvertRequest(textRequest)
// jsonData, err = json.Marshal(baiduRequest)
// }
// if err != nil {
// return nil, err
// }
// requestBody = bytes.NewBuffer(jsonData)
case constant.APITypePaLM:
palmRequest := google.ConvertPaLMRequest(textRequest)
jsonStr, err := json.Marshal(palmRequest)
if err != nil {
return nil, err
}
requestBody = bytes.NewBuffer(jsonStr)
case constant.APITypeGemini:
geminiChatRequest := google.ConvertGeminiRequest(textRequest)
jsonStr, err := json.Marshal(geminiChatRequest)
if err != nil {
return nil, err
}
requestBody = bytes.NewBuffer(jsonStr)
// case constant.APITypeZhipu:
// zhipuRequest := zhipu.ConvertRequest(textRequest)
// jsonStr, err := json.Marshal(zhipuRequest)
// if err != nil {
// return nil, err
// }
// requestBody = bytes.NewBuffer(jsonStr)
// case constant.APITypeAli:
// var jsonStr []byte
// var err error
// switch relayMode {
// case constant.RelayModeEmbeddings:
// aliEmbeddingRequest := ali.ConvertEmbeddingRequest(textRequest)
// jsonStr, err = json.Marshal(aliEmbeddingRequest)
// default:
// aliRequest := ali.ConvertRequest(textRequest)
// jsonStr, err = json.Marshal(aliRequest)
// }
// if err != nil {
// return nil, err
// }
// requestBody = bytes.NewBuffer(jsonStr)
// case constant.APITypeTencent:
// apiKey := c.Request.Header.Get("Authorization")
// apiKey = strings.TrimPrefix(apiKey, "Bearer ")
// appId, secretId, secretKey, err := tencent.ParseConfig(apiKey)
// if err != nil {
// return nil, err
// }
// tencentRequest := tencent.ConvertRequest(textRequest)
// tencentRequest.AppId = appId
// tencentRequest.SecretId = secretId
// jsonStr, err := json.Marshal(tencentRequest)
// if err != nil {
// return nil, err
// }
// sign := tencent.GetSign(*tencentRequest, secretKey)
// c.Request.Header.Set("Authorization", sign)
// requestBody = bytes.NewBuffer(jsonStr)
case constant.APITypeAIProxyLibrary:
aiProxyLibraryRequest := aiproxy.ConvertRequest(textRequest)
aiProxyLibraryRequest.LibraryId = c.GetString("library_id")
jsonStr, err := json.Marshal(aiProxyLibraryRequest)
if err != nil {
return nil, err
}
requestBody = bytes.NewBuffer(jsonStr)
}
return requestBody, nil
}
func SetupRequestHeaders(c *gin.Context, req *http.Request, meta *util.RelayMeta, isStream bool) {
SetupAuthHeaders(c, req, meta, isStream)
req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
req.Header.Set("Accept", c.Request.Header.Get("Accept"))
if isStream && c.Request.Header.Get("Accept") == "" {
req.Header.Set("Accept", "text/event-stream")
}
}
func SetupAuthHeaders(c *gin.Context, req *http.Request, meta *util.RelayMeta, isStream bool) {
apiKey := meta.APIKey
switch meta.APIType {
case constant.APITypeOpenAI:
if meta.ChannelType == common.ChannelTypeAzure {
req.Header.Set("api-key", apiKey)
} else {
req.Header.Set("Authorization", c.Request.Header.Get("Authorization"))
if meta.ChannelType == common.ChannelTypeOpenRouter {
req.Header.Set("HTTP-Referer", "https://github.com/songquanpeng/one-api")
req.Header.Set("X-Title", "One API")
}
}
case constant.APITypeClaude:
req.Header.Set("x-api-key", apiKey)
anthropicVersion := c.Request.Header.Get("anthropic-version")
if anthropicVersion == "" {
anthropicVersion = "2023-06-01"
}
req.Header.Set("anthropic-version", anthropicVersion)
// case constant.APITypeZhipu:
// token := zhipu.GetToken(apiKey)
// req.Header.Set("Authorization", token)
// case constant.APITypeAli:
// req.Header.Set("Authorization", "Bearer "+apiKey)
// if isStream {
// req.Header.Set("X-DashScope-SSE", "enable")
// }
// if c.GetString("plugin") != "" {
// req.Header.Set("X-DashScope-Plugin", c.GetString("plugin"))
// }
// case constant.APITypeTencent:
// req.Header.Set("Authorization", apiKey)
case constant.APITypePaLM:
req.Header.Set("x-goog-api-key", apiKey)
case constant.APITypeGemini:
req.Header.Set("x-goog-api-key", apiKey)
default:
req.Header.Set("Authorization", "Bearer "+apiKey)
}
}
func DoResponse(c *gin.Context, textRequest *openai.GeneralOpenAIRequest, resp *http.Response, relayMode int, apiType int, isStream bool, promptTokens int) (usage *openai.Usage, err *openai.ErrorWithStatusCode) {
var responseText string
switch apiType {
case constant.APITypeOpenAI:
if isStream {
err, responseText = openai.StreamHandler(c, resp, relayMode)
} else {
err, usage = openai.Handler(c, resp, promptTokens, textRequest.Model)
}
case constant.APITypeClaude:
if isStream {
err, responseText = anthropic.StreamHandler(c, resp)
} else {
err, usage = anthropic.Handler(c, resp, promptTokens, textRequest.Model)
}
// case constant.APITypeBaidu:
// if isStream {
// err, usage = baidu.StreamHandler(c, resp)
// } else {
// switch relayMode {
// case constant.RelayModeEmbeddings:
// err, usage = baidu.EmbeddingHandler(c, resp)
// default:
// err, usage = baidu.Handler(c, resp)
// }
// }
case constant.APITypePaLM:
if isStream { // PaLM2 API does not support stream
err, responseText = google.PaLMStreamHandler(c, resp)
} else {
err, usage = google.PaLMHandler(c, resp, promptTokens, textRequest.Model)
}
case constant.APITypeGemini:
if isStream {
err, responseText = google.StreamHandler(c, resp)
} else {
err, usage = google.GeminiHandler(c, resp, promptTokens, textRequest.Model)
}
// case constant.APITypeZhipu:
// if isStream {
// err, usage = zhipu.StreamHandler(c, resp)
// } else {
// err, usage = zhipu.Handler(c, resp)
// }
// case constant.APITypeAli:
// if isStream {
// err, usage = ali.StreamHandler(c, resp)
// } else {
// switch relayMode {
// case constant.RelayModeEmbeddings:
// err, usage = ali.EmbeddingHandler(c, resp)
// default:
// err, usage = ali.Handler(c, resp)
// }
// }
// case constant.APITypeXunfei:
// auth := c.Request.Header.Get("Authorization")
// auth = strings.TrimPrefix(auth, "Bearer ")
// splits := strings.Split(auth, "|")
// if len(splits) != 3 {
// return nil, openai.ErrorWrapper(errors.New("invalid auth"), "invalid_auth", http.StatusBadRequest)
// }
// if isStream {
// err, usage = xunfei.StreamHandler(c, *textRequest, splits[0], splits[1], splits[2])
// } else {
// err, usage = xunfei.Handler(c, *textRequest, splits[0], splits[1], splits[2])
// }
case constant.APITypeAIProxyLibrary:
if isStream {
err, usage = aiproxy.StreamHandler(c, resp)
} else {
err, usage = aiproxy.Handler(c, resp)
}
// case constant.APITypeTencent:
// if isStream {
// err, responseText = tencent.StreamHandler(c, resp)
// } else {
// err, usage = tencent.Handler(c, resp)
// }
default:
return nil, openai.ErrorWrapper(errors.New("unknown api type"), "unknown_api_type", http.StatusInternalServerError)
}
if err != nil {
return nil, err
}
if usage == nil && responseText != "" {
usage = &openai.Usage{}
usage.PromptTokens = promptTokens
usage.CompletionTokens = openai.CountTokenText(responseText, textRequest.Model)
usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
}
return usage, nil
}

View File

@@ -1,18 +1,23 @@
package controller
import (
"bytes"
"encoding/json"
"fmt"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/helper"
"github.com/songquanpeng/one-api/relay/model"
"github.com/songquanpeng/one-api/relay/util"
"io"
"net/http"
"strings"
)
func RelayTextHelper(c *gin.Context) *openai.ErrorWithStatusCode {
func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode {
ctx := c.Request.Context()
meta := util.GetRelayMeta(c)
// get & validate textRequest
@@ -21,9 +26,13 @@ func RelayTextHelper(c *gin.Context) *openai.ErrorWithStatusCode {
logger.Errorf(ctx, "getAndValidateTextRequest failed: %s", err.Error())
return openai.ErrorWrapper(err, "invalid_text_request", http.StatusBadRequest)
}
meta.IsStream = textRequest.Stream
// map model name
var isModelMapped bool
meta.OriginModelName = textRequest.Model
textRequest.Model, isModelMapped = util.GetMappedModelName(textRequest.Model, meta.ModelMapping)
meta.ActualModelName = textRequest.Model
// get model ratio & group ratio
modelRatio := common.GetModelRatio(textRequest.Model)
groupRatio := common.GetGroupRatio(meta.Group)
@@ -36,35 +45,50 @@ func RelayTextHelper(c *gin.Context) *openai.ErrorWithStatusCode {
return bizErr
}
adaptor := helper.GetAdaptor(meta.APIType)
if adaptor == nil {
return openai.ErrorWrapper(fmt.Errorf("invalid api type: %d", meta.APIType), "invalid_api_type", http.StatusBadRequest)
}
// get request body
requestBody, err := GetRequestBody(c, *textRequest, isModelMapped, meta.APIType, meta.Mode)
if err != nil {
return openai.ErrorWrapper(err, "get_request_body_failed", http.StatusInternalServerError)
var requestBody io.Reader
if meta.APIType == constant.APITypeOpenAI {
// no need to convert request for openai
if isModelMapped {
jsonStr, err := json.Marshal(textRequest)
if err != nil {
return openai.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
}
requestBody = bytes.NewBuffer(jsonStr)
} else {
requestBody = c.Request.Body
}
} else {
convertedRequest, err := adaptor.ConvertRequest(c, meta.Mode, textRequest)
if err != nil {
return openai.ErrorWrapper(err, "convert_request_failed", http.StatusInternalServerError)
}
jsonData, err := json.Marshal(convertedRequest)
if err != nil {
return openai.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
}
requestBody = bytes.NewBuffer(jsonData)
}
// do request
var resp *http.Response
isStream := textRequest.Stream
if meta.APIType != constant.APITypeXunfei { // cause xunfei use websocket
fullRequestURL, err := GetRequestURL(c.Request.URL.String(), meta, textRequest)
if err != nil {
logger.Error(ctx, fmt.Sprintf("util.GetRequestURL failed: %s", err.Error()))
return openai.ErrorWrapper(fmt.Errorf("util.GetRequestURL failed"), "get_request_url_failed", http.StatusInternalServerError)
}
resp, err = doRequest(ctx, c, meta, isStream, fullRequestURL, requestBody)
if err != nil {
logger.Errorf(ctx, "doRequest failed: %s", err.Error())
return openai.ErrorWrapper(err, "do_request_failed", http.StatusInternalServerError)
}
isStream = isStream || strings.HasPrefix(resp.Header.Get("Content-Type"), "text/event-stream")
if resp.StatusCode != http.StatusOK {
util.ReturnPreConsumedQuota(ctx, preConsumedQuota, meta.TokenId)
return util.RelayErrorHandler(resp)
}
resp, err := adaptor.DoRequest(c, meta, requestBody)
if err != nil {
logger.Errorf(ctx, "DoRequest failed: %s", err.Error())
return openai.ErrorWrapper(err, "do_request_failed", http.StatusInternalServerError)
}
meta.IsStream = meta.IsStream || strings.HasPrefix(resp.Header.Get("Content-Type"), "text/event-stream")
if resp.StatusCode != http.StatusOK {
util.ReturnPreConsumedQuota(ctx, preConsumedQuota, meta.TokenId)
return util.RelayErrorHandler(resp)
}
// do response
usage, respErr := DoResponse(c, textRequest, resp, meta.Mode, meta.APIType, isStream, promptTokens)
usage, respErr := adaptor.DoResponse(c, resp, meta)
if respErr != nil {
logger.Errorf(ctx, "respErr is not nil: %+v", respErr)
util.ReturnPreConsumedQuota(ctx, preConsumedQuota, meta.TokenId)

37
relay/helper/main.go Normal file
View File

@@ -0,0 +1,37 @@
package helper
import (
"github.com/songquanpeng/one-api/relay/channel"
"github.com/songquanpeng/one-api/relay/channel/aiproxy"
"github.com/songquanpeng/one-api/relay/channel/anthropic"
"github.com/songquanpeng/one-api/relay/channel/gemini"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/channel/palm"
"github.com/songquanpeng/one-api/relay/constant"
)
func GetAdaptor(apiType int) channel.Adaptor {
switch apiType {
case constant.APITypeAIProxyLibrary:
return &aiproxy.Adaptor{}
// case constant.APITypeAli:
// return &ali.Adaptor{}
case constant.APITypeAnthropic:
return &anthropic.Adaptor{}
// case constant.APITypeBaidu:
// return &baidu.Adaptor{}
case constant.APITypeGemini:
return &gemini.Adaptor{}
case constant.APITypeOpenAI:
return &openai.Adaptor{}
case constant.APITypePaLM:
return &palm.Adaptor{}
// case constant.APITypeTencent:
// return &tencent.Adaptor{}
// case constant.APITypeXunfei:
// return &xunfei.Adaptor{}
// case constant.APITypeZhipu:
// return &zhipu.Adaptor{}
}
return nil
}

View File

@@ -1,4 +1,4 @@
package openai
package model
const (
ContentTypeText = "text"

46
relay/model/general.go Normal file
View File

@@ -0,0 +1,46 @@
package model
type ResponseFormat struct {
Type string `json:"type,omitempty"`
}
type GeneralOpenAIRequest struct {
Model string `json:"model,omitempty"`
Messages []Message `json:"messages,omitempty"`
Prompt any `json:"prompt,omitempty"`
Stream bool `json:"stream,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
N int `json:"n,omitempty"`
Input any `json:"input,omitempty"`
Instruction string `json:"instruction,omitempty"`
Size string `json:"size,omitempty"`
Functions any `json:"functions,omitempty"`
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
PresencePenalty float64 `json:"presence_penalty,omitempty"`
ResponseFormat *ResponseFormat `json:"response_format,omitempty"`
Seed float64 `json:"seed,omitempty"`
Tools any `json:"tools,omitempty"`
ToolChoice any `json:"tool_choice,omitempty"`
User string `json:"user,omitempty"`
}
func (r GeneralOpenAIRequest) ParseInput() []string {
if r.Input == nil {
return nil
}
var input []string
switch r.Input.(type) {
case string:
input = []string{r.Input.(string)}
case []any:
input = make([]string, 0, len(r.Input.([]any)))
for _, item := range r.Input.([]any) {
if str, ok := item.(string); ok {
input = append(input, str)
}
}
}
return input
}

88
relay/model/message.go Normal file
View File

@@ -0,0 +1,88 @@
package model
type Message struct {
Role string `json:"role"`
Content any `json:"content"`
Name *string `json:"name,omitempty"`
}
func (m Message) IsStringContent() bool {
_, ok := m.Content.(string)
return ok
}
func (m Message) StringContent() string {
content, ok := m.Content.(string)
if ok {
return content
}
contentList, ok := m.Content.([]any)
if ok {
var contentStr string
for _, contentItem := range contentList {
contentMap, ok := contentItem.(map[string]any)
if !ok {
continue
}
if contentMap["type"] == ContentTypeText {
if subStr, ok := contentMap["text"].(string); ok {
contentStr += subStr
}
}
}
return contentStr
}
return ""
}
func (m Message) ParseContent() []MessageContent {
var contentList []MessageContent
content, ok := m.Content.(string)
if ok {
contentList = append(contentList, MessageContent{
Type: ContentTypeText,
Text: content,
})
return contentList
}
anyList, ok := m.Content.([]any)
if ok {
for _, contentItem := range anyList {
contentMap, ok := contentItem.(map[string]any)
if !ok {
continue
}
switch contentMap["type"] {
case ContentTypeText:
if subStr, ok := contentMap["text"].(string); ok {
contentList = append(contentList, MessageContent{
Type: ContentTypeText,
Text: subStr,
})
}
case ContentTypeImageURL:
if subObj, ok := contentMap["image_url"].(map[string]any); ok {
contentList = append(contentList, MessageContent{
Type: ContentTypeImageURL,
ImageURL: &ImageURL{
Url: subObj["url"].(string),
},
})
}
}
}
return contentList
}
return nil
}
type ImageURL struct {
Url string `json:"url,omitempty"`
Detail string `json:"detail,omitempty"`
}
type MessageContent struct {
Type string `json:"type,omitempty"`
Text string `json:"text"`
ImageURL *ImageURL `json:"image_url,omitempty"`
}

19
relay/model/misc.go Normal file
View File

@@ -0,0 +1,19 @@
package model
type Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
}
type Error struct {
Message string `json:"message"`
Type string `json:"type"`
Param string `json:"param"`
Code any `json:"code"`
}
type ErrorWithStatusCode struct {
Error
StatusCode int `json:"status_code"`
}

View File

@@ -8,7 +8,7 @@ import (
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/model"
"github.com/songquanpeng/one-api/relay/channel/openai"
relaymodel "github.com/songquanpeng/one-api/relay/model"
"io"
"net/http"
"strconv"
@@ -17,7 +17,7 @@ import (
"github.com/gin-gonic/gin"
)
func ShouldDisableChannel(err *openai.Error, statusCode int) bool {
func ShouldDisableChannel(err *relaymodel.Error, statusCode int) bool {
if !config.AutomaticDisableChannelEnabled {
return false
}
@@ -33,7 +33,7 @@ func ShouldDisableChannel(err *openai.Error, statusCode int) bool {
return false
}
func ShouldEnableChannel(err error, openAIErr *openai.Error) bool {
func ShouldEnableChannel(err error, openAIErr *relaymodel.Error) bool {
if !config.AutomaticEnableChannelEnabled {
return false
}
@@ -47,11 +47,11 @@ func ShouldEnableChannel(err error, openAIErr *openai.Error) bool {
}
type GeneralErrorResponse struct {
Error openai.Error `json:"error"`
Message string `json:"message"`
Msg string `json:"msg"`
Err string `json:"err"`
ErrorMsg string `json:"error_msg"`
Error relaymodel.Error `json:"error"`
Message string `json:"message"`
Msg string `json:"msg"`
Err string `json:"err"`
ErrorMsg string `json:"error_msg"`
Header struct {
Message string `json:"message"`
} `json:"header"`
@@ -87,10 +87,10 @@ func (e GeneralErrorResponse) ToMessage() string {
return ""
}
func RelayErrorHandler(resp *http.Response) (ErrorWithStatusCode *openai.ErrorWithStatusCode) {
ErrorWithStatusCode = &openai.ErrorWithStatusCode{
func RelayErrorHandler(resp *http.Response) (ErrorWithStatusCode *relaymodel.ErrorWithStatusCode) {
ErrorWithStatusCode = &relaymodel.ErrorWithStatusCode{
StatusCode: resp.StatusCode,
Error: openai.Error{
Error: relaymodel.Error{
Message: "",
Type: "upstream_error",
Code: "bad_response_status_code",
@@ -162,7 +162,7 @@ func GetAzureAPIVersion(c *gin.Context) string {
query := c.Request.URL.Query()
apiVersion := query.Get("api-version")
if apiVersion == "" {
apiVersion = c.GetString("api_version")
apiVersion = c.GetString(common.ConfigKeyAPIVersion)
}
return apiVersion
}

View File

@@ -8,35 +8,41 @@ import (
)
type RelayMeta struct {
Mode int
ChannelType int
ChannelId int
TokenId int
TokenName string
UserId int
Group string
ModelMapping map[string]string
BaseURL string
APIVersion string
APIKey string
APIType int
Config map[string]string
Mode int
ChannelType int
ChannelId int
TokenId int
TokenName string
UserId int
Group string
ModelMapping map[string]string
BaseURL string
APIVersion string
APIKey string
APIType int
Config map[string]string
IsStream bool
OriginModelName string
ActualModelName string
RequestURLPath string
PromptTokens int // only for DoResponse
}
func GetRelayMeta(c *gin.Context) *RelayMeta {
meta := RelayMeta{
Mode: constant.Path2RelayMode(c.Request.URL.Path),
ChannelType: c.GetInt("channel"),
ChannelId: c.GetInt("channel_id"),
TokenId: c.GetInt("token_id"),
TokenName: c.GetString("token_name"),
UserId: c.GetInt("id"),
Group: c.GetString("group"),
ModelMapping: c.GetStringMapString("model_mapping"),
BaseURL: c.GetString("base_url"),
APIVersion: c.GetString("api_version"),
APIKey: strings.TrimPrefix(c.Request.Header.Get("Authorization"), "Bearer "),
Config: nil,
Mode: constant.Path2RelayMode(c.Request.URL.Path),
ChannelType: c.GetInt("channel"),
ChannelId: c.GetInt("channel_id"),
TokenId: c.GetInt("token_id"),
TokenName: c.GetString("token_name"),
UserId: c.GetInt("id"),
Group: c.GetString("group"),
ModelMapping: c.GetStringMapString("model_mapping"),
BaseURL: c.GetString("base_url"),
APIVersion: c.GetString(common.ConfigKeyAPIVersion),
APIKey: strings.TrimPrefix(c.Request.Header.Get("Authorization"), "Bearer "),
Config: nil,
RequestURLPath: c.Request.URL.String(),
}
if meta.ChannelType == common.ChannelTypeAzure {
meta.APIVersion = GetAzureAPIVersion(c)

View File

@@ -2,12 +2,12 @@ package util
import (
"errors"
"github.com/songquanpeng/one-api/relay/channel/openai"
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/model"
"math"
)
func ValidateTextRequest(textRequest *openai.GeneralOpenAIRequest, relayMode int) error {
func ValidateTextRequest(textRequest *model.GeneralOpenAIRequest, relayMode int) error {
if textRequest.MaxTokens < 0 || textRequest.MaxTokens > math.MaxInt32/2 {
return errors.New("max_tokens is invalid")
}

View File

@@ -59,6 +59,12 @@ export const CHANNEL_OPTIONS = {
value: 19,
color: 'default'
},
25: {
key: 25,
text: 'Moonshot AI',
value: 19,
color: 'default'
},
23: {
key: 23,
text: '腾讯混元',

View File

@@ -202,9 +202,7 @@ export default function ChannelPage() {
</Stack>
<Stack mb={5}>
<Alert severity="info">
当前版本测试是通过按照 OpenAI API 格式使用 gpt-3.5-turbo
模型进行非流式请求实现的因此测试报错并不一定代表通道不可用该功能后续会修复 另外OpenAI 渠道已经不再支持通过 key
获取余额因此余额显示为 0对于支持的渠道类型请点击余额进行刷新
OpenAI 渠道已经不再支持通过 key 获取余额因此余额显示为 0对于支持的渠道类型请点击余额进行刷新
</Alert>
</Stack>
<Card>
@@ -229,9 +227,9 @@ export default function ChannelPage() {
<Button onClick={testAllChannels} startIcon={<IconBrandSpeedtest width={'18px'} />}>
测试启用渠道
</Button>
<Button onClick={updateAllChannelsBalance} startIcon={<IconCoinYuan width={'18px'} />}>
更新启用余额
</Button>
{/*<Button onClick={updateAllChannelsBalance} startIcon={<IconCoinYuan width={'18px'} />}>*/}
{/* 更新启用余额*/}
{/*</Button>*/}
<Button onClick={deleteAllDisabledChannels} startIcon={<IconHttpDelete width={'18px'} />}>
删除禁用渠道
</Button>

View File

@@ -94,7 +94,13 @@ const typeConfig = {
other: "版本号",
},
input: {
models: ["SparkDesk"],
models: [
"SparkDesk",
'SparkDesk-v1.1',
'SparkDesk-v2.1',
'SparkDesk-v3.1',
'SparkDesk-v3.5'
],
},
prompt: {
key: "按照如下格式输入APPID|APISecret|APIKey",

View File

@@ -322,10 +322,7 @@ const ChannelsTable = () => {
setShowPrompt(false);
setPromptShown("channel-test");
}}>
当前版本测试是通过按照 OpenAI API 格式使用 gpt-3.5-turbo
模型进行非流式请求实现的因此测试报错并不一定代表通道不可用该功能后续会修复
另外OpenAI 渠道已经不再支持通过 key 获取余额因此余额显示为 0对于支持的渠道类型请点击余额进行刷新
OpenAI 渠道已经不再支持通过 key 获取余额因此余额显示为 0对于支持的渠道类型请点击余额进行刷新
</Message>
)
}
@@ -525,8 +522,8 @@ const ChannelsTable = () => {
<Button size='small' loading={loading} onClick={testAllChannels}>
测试所有渠道
</Button>
<Button size='small' onClick={updateAllChannelsBalance}
loading={loading || updatingBalance}>更新已启用渠道余额</Button>
{/*<Button size='small' onClick={updateAllChannelsBalance}*/}
{/* loading={loading || updatingBalance}>更新已启用渠道余额</Button>*/}
<Popup
trigger={
<Button size='small' loading={loading}>

View File

@@ -9,6 +9,7 @@ export const CHANNEL_OPTIONS = [
{ key: 18, text: '讯飞星火认知', value: 18, color: 'blue' },
{ key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet' },
{ key: 19, text: '360 智脑', value: 19, color: 'blue' },
{ key: 25, text: 'Moonshot AI', value: 25, color: 'black' },
{ key: 23, text: '腾讯混元', value: 23, color: 'teal' },
{ key: 8, text: '自定义渠道', value: 8, color: 'pink' },
{ key: 22, text: '知识库FastGPT', value: 22, color: 'blue' },

View File

@@ -82,7 +82,13 @@ const EditChannel = () => {
localModels = ['chatglm_turbo', 'chatglm_pro', 'chatglm_std', 'chatglm_lite'];
break;
case 18:
localModels = ['SparkDesk'];
localModels = [
'SparkDesk',
'SparkDesk-v1.1',
'SparkDesk-v2.1',
'SparkDesk-v3.1',
'SparkDesk-v3.5'
];
break;
case 19:
localModels = ['360GPT_S2_V9', 'embedding-bert-512-v1', 'embedding_s1_v1', 'semantic_similarity_s1_v1'];
@@ -93,6 +99,9 @@ const EditChannel = () => {
case 24:
localModels = ['gemini-pro', 'gemini-pro-vision'];
break;
case 25:
localModels = ['moonshot-v1-8k', 'moonshot-v1-32k', 'moonshot-v1-128k'];
break;
}
setInputs((inputs) => ({ ...inputs, models: localModels }));
}