mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-11-10 02:23:43 +08:00
Merge remote-tracking branch 'origin/upstream/main'
This commit is contained in:
@@ -4,14 +4,13 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/Laisky/errors/v2"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/anthropic"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/aws/utils"
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
@@ -19,18 +18,52 @@ import (
|
||||
var _ adaptor.Adaptor = new(Adaptor)
|
||||
|
||||
type Adaptor struct {
|
||||
meta *meta.Meta
|
||||
awsClient *bedrockruntime.Client
|
||||
awsAdapter utils.AwsAdapter
|
||||
|
||||
Meta *meta.Meta
|
||||
AwsClient *bedrockruntime.Client
|
||||
}
|
||||
|
||||
func (a *Adaptor) Init(meta *meta.Meta) {
|
||||
a.meta = meta
|
||||
a.awsClient = bedrockruntime.New(bedrockruntime.Options{
|
||||
a.Meta = meta
|
||||
a.AwsClient = bedrockruntime.New(bedrockruntime.Options{
|
||||
Region: meta.Config.Region,
|
||||
Credentials: aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(meta.Config.AK, meta.Config.SK, "")),
|
||||
})
|
||||
}
|
||||
|
||||
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
|
||||
if request == nil {
|
||||
return nil, errors.New("request is nil")
|
||||
}
|
||||
|
||||
adaptor := GetAdaptor(request.Model)
|
||||
if adaptor == nil {
|
||||
return nil, errors.New("adaptor not found")
|
||||
}
|
||||
|
||||
a.awsAdapter = adaptor
|
||||
return adaptor.ConvertRequest(c, relayMode, request)
|
||||
}
|
||||
|
||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
|
||||
if a.awsAdapter == nil {
|
||||
return nil, utils.WrapErr(errors.New("awsAdapter is nil"))
|
||||
}
|
||||
return a.awsAdapter.DoResponse(c, a.AwsClient, meta)
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetModelList() (models []string) {
|
||||
for model := range adaptors {
|
||||
models = append(models, model)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetChannelName() string {
|
||||
return "aws"
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
@@ -39,17 +72,6 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *me
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
|
||||
if request == nil {
|
||||
return nil, errors.New("request is nil")
|
||||
}
|
||||
|
||||
claudeReq := anthropic.ConvertRequest(*request)
|
||||
c.Set(ctxkey.RequestModel, request.Model)
|
||||
c.Set(ctxkey.ConvertedRequest, claudeReq)
|
||||
return claudeReq, nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
|
||||
if request == nil {
|
||||
return nil, errors.New("request is nil")
|
||||
@@ -60,23 +82,3 @@ func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error)
|
||||
func (a *Adaptor) DoRequest(c *gin.Context, meta *meta.Meta, requestBody io.Reader) (*http.Response, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
|
||||
if meta.IsStream {
|
||||
err, usage = StreamHandler(c, a.awsClient)
|
||||
} else {
|
||||
err, usage = Handler(c, a.awsClient, meta.ActualModelName)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetModelList() (models []string) {
|
||||
for n := range awsModelIDMap {
|
||||
models = append(models, n)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetChannelName() string {
|
||||
return "aws"
|
||||
}
|
||||
37
relay/adaptor/aws/claude/adapter.go
Normal file
37
relay/adaptor/aws/claude/adapter.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/anthropic"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/aws/utils"
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
var _ utils.AwsAdapter = new(Adaptor)
|
||||
|
||||
type Adaptor struct {
|
||||
}
|
||||
|
||||
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
|
||||
if request == nil {
|
||||
return nil, errors.New("request is nil")
|
||||
}
|
||||
|
||||
claudeReq := anthropic.ConvertRequest(*request)
|
||||
c.Set(ctxkey.RequestModel, request.Model)
|
||||
c.Set(ctxkey.ConvertedRequest, claudeReq)
|
||||
return claudeReq, nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) DoResponse(c *gin.Context, awsCli *bedrockruntime.Client, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
|
||||
if meta.IsStream {
|
||||
err, usage = StreamHandler(c, awsCli)
|
||||
} else {
|
||||
err, usage = Handler(c, awsCli, meta.ActualModelName)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -19,21 +19,13 @@ import (
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/anthropic"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/aws/utils"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
func wrapErr(err error) *relaymodel.ErrorWithStatusCode {
|
||||
return &relaymodel.ErrorWithStatusCode{
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Error: relaymodel.Error{
|
||||
Message: fmt.Sprintf("%s", err.Error()),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
|
||||
var awsModelIDMap = map[string]string{
|
||||
var AwsModelIDMap = map[string]string{
|
||||
"claude-instant-1.2": "anthropic.claude-instant-v1",
|
||||
"claude-2.0": "anthropic.claude-v2",
|
||||
"claude-2.1": "anthropic.claude-v2:1",
|
||||
@@ -44,7 +36,7 @@ var awsModelIDMap = map[string]string{
|
||||
}
|
||||
|
||||
func awsModelID(requestModel string) (string, error) {
|
||||
if awsModelID, ok := awsModelIDMap[requestModel]; ok {
|
||||
if awsModelID, ok := AwsModelIDMap[requestModel]; ok {
|
||||
return awsModelID, nil
|
||||
}
|
||||
|
||||
@@ -54,7 +46,7 @@ func awsModelID(requestModel string) (string, error) {
|
||||
func Handler(c *gin.Context, awsCli *bedrockruntime.Client, modelName string) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) {
|
||||
awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel))
|
||||
if err != nil {
|
||||
return wrapErr(errors.Wrap(err, "awsModelID")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
|
||||
}
|
||||
|
||||
awsReq := &bedrockruntime.InvokeModelInput{
|
||||
@@ -65,30 +57,30 @@ func Handler(c *gin.Context, awsCli *bedrockruntime.Client, modelName string) (*
|
||||
|
||||
claudeReq_, ok := c.Get(ctxkey.ConvertedRequest)
|
||||
if !ok {
|
||||
return wrapErr(errors.New("request not found")), nil
|
||||
return utils.WrapErr(errors.New("request not found")), nil
|
||||
}
|
||||
claudeReq := claudeReq_.(*anthropic.Request)
|
||||
awsClaudeReq := &Request{
|
||||
AnthropicVersion: "bedrock-2023-05-31",
|
||||
}
|
||||
if err = copier.Copy(awsClaudeReq, claudeReq); err != nil {
|
||||
return wrapErr(errors.Wrap(err, "copy request")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "copy request")), nil
|
||||
}
|
||||
|
||||
awsReq.Body, err = json.Marshal(awsClaudeReq)
|
||||
if err != nil {
|
||||
return wrapErr(errors.Wrap(err, "marshal request")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "marshal request")), nil
|
||||
}
|
||||
|
||||
awsResp, err := awsCli.InvokeModel(c.Request.Context(), awsReq)
|
||||
if err != nil {
|
||||
return wrapErr(errors.Wrap(err, "InvokeModel")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "InvokeModel")), nil
|
||||
}
|
||||
|
||||
claudeResponse := new(anthropic.Response)
|
||||
err = json.Unmarshal(awsResp.Body, claudeResponse)
|
||||
if err != nil {
|
||||
return wrapErr(errors.Wrap(err, "unmarshal response")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "unmarshal response")), nil
|
||||
}
|
||||
|
||||
openaiResp := anthropic.ResponseClaude2OpenAI(claudeResponse)
|
||||
@@ -108,7 +100,7 @@ func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.E
|
||||
createdTime := helper.GetTimestamp()
|
||||
awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel))
|
||||
if err != nil {
|
||||
return wrapErr(errors.Wrap(err, "awsModelID")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
|
||||
}
|
||||
|
||||
awsReq := &bedrockruntime.InvokeModelWithResponseStreamInput{
|
||||
@@ -119,7 +111,7 @@ func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.E
|
||||
|
||||
claudeReq_, ok := c.Get(ctxkey.ConvertedRequest)
|
||||
if !ok {
|
||||
return wrapErr(errors.New("request not found")), nil
|
||||
return utils.WrapErr(errors.New("request not found")), nil
|
||||
}
|
||||
claudeReq := claudeReq_.(*anthropic.Request)
|
||||
|
||||
@@ -127,16 +119,16 @@ func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.E
|
||||
AnthropicVersion: "bedrock-2023-05-31",
|
||||
}
|
||||
if err = copier.Copy(awsClaudeReq, claudeReq); err != nil {
|
||||
return wrapErr(errors.Wrap(err, "copy request")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "copy request")), nil
|
||||
}
|
||||
awsReq.Body, err = json.Marshal(awsClaudeReq)
|
||||
if err != nil {
|
||||
return wrapErr(errors.Wrap(err, "marshal request")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "marshal request")), nil
|
||||
}
|
||||
|
||||
awsResp, err := awsCli.InvokeModelWithResponseStream(c.Request.Context(), awsReq)
|
||||
if err != nil {
|
||||
return wrapErr(errors.Wrap(err, "InvokeModelWithResponseStream")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "InvokeModelWithResponseStream")), nil
|
||||
}
|
||||
stream := awsResp.GetStream()
|
||||
defer stream.Close()
|
||||
37
relay/adaptor/aws/llama3/adapter.go
Normal file
37
relay/adaptor/aws/llama3/adapter.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/aws/utils"
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
var _ utils.AwsAdapter = new(Adaptor)
|
||||
|
||||
type Adaptor struct {
|
||||
}
|
||||
|
||||
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
|
||||
if request == nil {
|
||||
return nil, errors.New("request is nil")
|
||||
}
|
||||
|
||||
llamaReq := ConvertRequest(*request)
|
||||
c.Set(ctxkey.RequestModel, request.Model)
|
||||
c.Set(ctxkey.ConvertedRequest, llamaReq)
|
||||
return llamaReq, nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) DoResponse(c *gin.Context, awsCli *bedrockruntime.Client, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
|
||||
if meta.IsStream {
|
||||
err, usage = StreamHandler(c, awsCli)
|
||||
} else {
|
||||
err, usage = Handler(c, awsCli, meta.ActualModelName)
|
||||
}
|
||||
return
|
||||
}
|
||||
231
relay/adaptor/aws/llama3/main.go
Normal file
231
relay/adaptor/aws/llama3/main.go
Normal file
@@ -0,0 +1,231 @@
|
||||
// Package aws provides the AWS adaptor for the relay service.
|
||||
package aws
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"text/template"
|
||||
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
"github.com/songquanpeng/one-api/common/random"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime/types"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/aws/utils"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
// Only support llama-3-8b and llama-3-70b instruction models
|
||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
|
||||
var AwsModelIDMap = map[string]string{
|
||||
"llama3-8b-8192": "meta.llama3-8b-instruct-v1:0",
|
||||
"llama3-70b-8192": "meta.llama3-70b-instruct-v1:0",
|
||||
}
|
||||
|
||||
func awsModelID(requestModel string) (string, error) {
|
||||
if awsModelID, ok := AwsModelIDMap[requestModel]; ok {
|
||||
return awsModelID, nil
|
||||
}
|
||||
|
||||
return "", errors.Errorf("model %s not found", requestModel)
|
||||
}
|
||||
|
||||
// promptTemplate with range
|
||||
const promptTemplate = `<|begin_of_text|>{{range .Messages}}<|start_header_id|>{{.Role}}<|end_header_id|>{{.StringContent}}<|eot_id|>{{end}}<|start_header_id|>assistant<|end_header_id|>
|
||||
`
|
||||
|
||||
var promptTpl = template.Must(template.New("llama3-chat").Parse(promptTemplate))
|
||||
|
||||
func RenderPrompt(messages []relaymodel.Message) string {
|
||||
var buf bytes.Buffer
|
||||
err := promptTpl.Execute(&buf, struct{ Messages []relaymodel.Message }{messages})
|
||||
if err != nil {
|
||||
logger.SysError("error rendering prompt messages: " + err.Error())
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func ConvertRequest(textRequest relaymodel.GeneralOpenAIRequest) *Request {
|
||||
llamaRequest := Request{
|
||||
MaxGenLen: textRequest.MaxTokens,
|
||||
Temperature: textRequest.Temperature,
|
||||
TopP: textRequest.TopP,
|
||||
}
|
||||
if llamaRequest.MaxGenLen == 0 {
|
||||
llamaRequest.MaxGenLen = 2048
|
||||
}
|
||||
prompt := RenderPrompt(textRequest.Messages)
|
||||
llamaRequest.Prompt = prompt
|
||||
return &llamaRequest
|
||||
}
|
||||
|
||||
func Handler(c *gin.Context, awsCli *bedrockruntime.Client, modelName string) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) {
|
||||
awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel))
|
||||
if err != nil {
|
||||
return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
|
||||
}
|
||||
|
||||
awsReq := &bedrockruntime.InvokeModelInput{
|
||||
ModelId: aws.String(awsModelId),
|
||||
Accept: aws.String("application/json"),
|
||||
ContentType: aws.String("application/json"),
|
||||
}
|
||||
|
||||
llamaReq, ok := c.Get(ctxkey.ConvertedRequest)
|
||||
if !ok {
|
||||
return utils.WrapErr(errors.New("request not found")), nil
|
||||
}
|
||||
|
||||
awsReq.Body, err = json.Marshal(llamaReq)
|
||||
if err != nil {
|
||||
return utils.WrapErr(errors.Wrap(err, "marshal request")), nil
|
||||
}
|
||||
|
||||
awsResp, err := awsCli.InvokeModel(c.Request.Context(), awsReq)
|
||||
if err != nil {
|
||||
return utils.WrapErr(errors.Wrap(err, "InvokeModel")), nil
|
||||
}
|
||||
|
||||
var llamaResponse Response
|
||||
err = json.Unmarshal(awsResp.Body, &llamaResponse)
|
||||
if err != nil {
|
||||
return utils.WrapErr(errors.Wrap(err, "unmarshal response")), nil
|
||||
}
|
||||
|
||||
openaiResp := ResponseLlama2OpenAI(&llamaResponse)
|
||||
openaiResp.Model = modelName
|
||||
usage := relaymodel.Usage{
|
||||
PromptTokens: llamaResponse.PromptTokenCount,
|
||||
CompletionTokens: llamaResponse.GenerationTokenCount,
|
||||
TotalTokens: llamaResponse.PromptTokenCount + llamaResponse.GenerationTokenCount,
|
||||
}
|
||||
openaiResp.Usage = usage
|
||||
|
||||
c.JSON(http.StatusOK, openaiResp)
|
||||
return nil, &usage
|
||||
}
|
||||
|
||||
func ResponseLlama2OpenAI(llamaResponse *Response) *openai.TextResponse {
|
||||
var responseText string
|
||||
if len(llamaResponse.Generation) > 0 {
|
||||
responseText = llamaResponse.Generation
|
||||
}
|
||||
choice := openai.TextResponseChoice{
|
||||
Index: 0,
|
||||
Message: relaymodel.Message{
|
||||
Role: "assistant",
|
||||
Content: responseText,
|
||||
Name: nil,
|
||||
},
|
||||
FinishReason: llamaResponse.StopReason,
|
||||
}
|
||||
fullTextResponse := openai.TextResponse{
|
||||
Id: fmt.Sprintf("chatcmpl-%s", random.GetUUID()),
|
||||
Object: "chat.completion",
|
||||
Created: helper.GetTimestamp(),
|
||||
Choices: []openai.TextResponseChoice{choice},
|
||||
}
|
||||
return &fullTextResponse
|
||||
}
|
||||
|
||||
func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) {
|
||||
createdTime := helper.GetTimestamp()
|
||||
awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel))
|
||||
if err != nil {
|
||||
return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
|
||||
}
|
||||
|
||||
awsReq := &bedrockruntime.InvokeModelWithResponseStreamInput{
|
||||
ModelId: aws.String(awsModelId),
|
||||
Accept: aws.String("application/json"),
|
||||
ContentType: aws.String("application/json"),
|
||||
}
|
||||
|
||||
llamaReq, ok := c.Get(ctxkey.ConvertedRequest)
|
||||
if !ok {
|
||||
return utils.WrapErr(errors.New("request not found")), nil
|
||||
}
|
||||
|
||||
awsReq.Body, err = json.Marshal(llamaReq)
|
||||
if err != nil {
|
||||
return utils.WrapErr(errors.Wrap(err, "marshal request")), nil
|
||||
}
|
||||
|
||||
awsResp, err := awsCli.InvokeModelWithResponseStream(c.Request.Context(), awsReq)
|
||||
if err != nil {
|
||||
return utils.WrapErr(errors.Wrap(err, "InvokeModelWithResponseStream")), nil
|
||||
}
|
||||
stream := awsResp.GetStream()
|
||||
defer stream.Close()
|
||||
|
||||
c.Writer.Header().Set("Content-Type", "text/event-stream")
|
||||
var usage relaymodel.Usage
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
event, ok := <-stream.Events()
|
||||
if !ok {
|
||||
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
|
||||
return false
|
||||
}
|
||||
|
||||
switch v := event.(type) {
|
||||
case *types.ResponseStreamMemberChunk:
|
||||
var llamaResp StreamResponse
|
||||
err := json.NewDecoder(bytes.NewReader(v.Value.Bytes)).Decode(&llamaResp)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
return false
|
||||
}
|
||||
|
||||
if llamaResp.PromptTokenCount > 0 {
|
||||
usage.PromptTokens = llamaResp.PromptTokenCount
|
||||
}
|
||||
if llamaResp.StopReason == "stop" {
|
||||
usage.CompletionTokens = llamaResp.GenerationTokenCount
|
||||
usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
|
||||
}
|
||||
response := StreamResponseLlama2OpenAI(&llamaResp)
|
||||
response.Id = fmt.Sprintf("chatcmpl-%s", random.GetUUID())
|
||||
response.Model = c.GetString(ctxkey.OriginalModel)
|
||||
response.Created = createdTime
|
||||
jsonStr, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonStr)})
|
||||
return true
|
||||
case *types.UnknownUnionMember:
|
||||
fmt.Println("unknown tag:", v.Tag)
|
||||
return false
|
||||
default:
|
||||
fmt.Println("union is nil or unknown type")
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
return nil, &usage
|
||||
}
|
||||
|
||||
func StreamResponseLlama2OpenAI(llamaResponse *StreamResponse) *openai.ChatCompletionsStreamResponse {
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = llamaResponse.Generation
|
||||
choice.Delta.Role = "assistant"
|
||||
finishReason := llamaResponse.StopReason
|
||||
if finishReason != "null" {
|
||||
choice.FinishReason = &finishReason
|
||||
}
|
||||
var openaiResponse openai.ChatCompletionsStreamResponse
|
||||
openaiResponse.Object = "chat.completion.chunk"
|
||||
openaiResponse.Choices = []openai.ChatCompletionsStreamResponseChoice{choice}
|
||||
return &openaiResponse
|
||||
}
|
||||
45
relay/adaptor/aws/llama3/main_test.go
Normal file
45
relay/adaptor/aws/llama3/main_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package aws_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
aws "github.com/songquanpeng/one-api/relay/adaptor/aws/llama3"
|
||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRenderPrompt(t *testing.T) {
|
||||
messages := []relaymodel.Message{
|
||||
{
|
||||
Role: "user",
|
||||
Content: "What's your name?",
|
||||
},
|
||||
}
|
||||
prompt := aws.RenderPrompt(messages)
|
||||
expected := `<|begin_of_text|><|start_header_id|>user<|end_header_id|>What's your name?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
|
||||
`
|
||||
assert.Equal(t, expected, prompt)
|
||||
|
||||
messages = []relaymodel.Message{
|
||||
{
|
||||
Role: "system",
|
||||
Content: "Your name is Kat. You are a detective.",
|
||||
},
|
||||
{
|
||||
Role: "user",
|
||||
Content: "What's your name?",
|
||||
},
|
||||
{
|
||||
Role: "assistant",
|
||||
Content: "Kat",
|
||||
},
|
||||
{
|
||||
Role: "user",
|
||||
Content: "What's your job?",
|
||||
},
|
||||
}
|
||||
prompt = aws.RenderPrompt(messages)
|
||||
expected = `<|begin_of_text|><|start_header_id|>system<|end_header_id|>Your name is Kat. You are a detective.<|eot_id|><|start_header_id|>user<|end_header_id|>What's your name?<|eot_id|><|start_header_id|>assistant<|end_header_id|>Kat<|eot_id|><|start_header_id|>user<|end_header_id|>What's your job?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
|
||||
`
|
||||
assert.Equal(t, expected, prompt)
|
||||
}
|
||||
29
relay/adaptor/aws/llama3/model.go
Normal file
29
relay/adaptor/aws/llama3/model.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package aws
|
||||
|
||||
// Request is the request to AWS Llama3
|
||||
//
|
||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-meta.html
|
||||
type Request struct {
|
||||
Prompt string `json:"prompt"`
|
||||
MaxGenLen int `json:"max_gen_len,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
}
|
||||
|
||||
// Response is the response from AWS Llama3
|
||||
//
|
||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-meta.html
|
||||
type Response struct {
|
||||
Generation string `json:"generation"`
|
||||
PromptTokenCount int `json:"prompt_token_count"`
|
||||
GenerationTokenCount int `json:"generation_token_count"`
|
||||
StopReason string `json:"stop_reason"`
|
||||
}
|
||||
|
||||
// {'generation': 'Hi', 'prompt_token_count': 15, 'generation_token_count': 1, 'stop_reason': None}
|
||||
type StreamResponse struct {
|
||||
Generation string `json:"generation"`
|
||||
PromptTokenCount int `json:"prompt_token_count"`
|
||||
GenerationTokenCount int `json:"generation_token_count"`
|
||||
StopReason string `json:"stop_reason"`
|
||||
}
|
||||
39
relay/adaptor/aws/registry.go
Normal file
39
relay/adaptor/aws/registry.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
claude "github.com/songquanpeng/one-api/relay/adaptor/aws/claude"
|
||||
llama3 "github.com/songquanpeng/one-api/relay/adaptor/aws/llama3"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/aws/utils"
|
||||
)
|
||||
|
||||
type AwsModelType int
|
||||
|
||||
const (
|
||||
AwsClaude AwsModelType = iota + 1
|
||||
AwsLlama3
|
||||
)
|
||||
|
||||
var (
|
||||
adaptors = map[string]AwsModelType{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
for model := range claude.AwsModelIDMap {
|
||||
adaptors[model] = AwsClaude
|
||||
}
|
||||
for model := range llama3.AwsModelIDMap {
|
||||
adaptors[model] = AwsLlama3
|
||||
}
|
||||
}
|
||||
|
||||
func GetAdaptor(model string) utils.AwsAdapter {
|
||||
adaptorType := adaptors[model]
|
||||
switch adaptorType {
|
||||
case AwsClaude:
|
||||
return &claude.Adaptor{}
|
||||
case AwsLlama3:
|
||||
return &llama3.Adaptor{}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
51
relay/adaptor/aws/utils/adaptor.go
Normal file
51
relay/adaptor/aws/utils/adaptor.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
type AwsAdapter interface {
|
||||
ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error)
|
||||
DoResponse(c *gin.Context, awsCli *bedrockruntime.Client, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode)
|
||||
}
|
||||
|
||||
type Adaptor struct {
|
||||
Meta *meta.Meta
|
||||
AwsClient *bedrockruntime.Client
|
||||
}
|
||||
|
||||
func (a *Adaptor) Init(meta *meta.Meta) {
|
||||
a.Meta = meta
|
||||
a.AwsClient = bedrockruntime.New(bedrockruntime.Options{
|
||||
Region: meta.Config.Region,
|
||||
Credentials: aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(meta.Config.AK, meta.Config.SK, "")),
|
||||
})
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *meta.Meta) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
|
||||
if request == nil {
|
||||
return nil, errors.New("request is nil")
|
||||
}
|
||||
return request, nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) DoRequest(c *gin.Context, meta *meta.Meta, requestBody io.Reader) (*http.Response, error) {
|
||||
return nil, nil
|
||||
}
|
||||
16
relay/adaptor/aws/utils/utils.go
Normal file
16
relay/adaptor/aws/utils/utils.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
func WrapErr(err error) *relaymodel.ErrorWithStatusCode {
|
||||
return &relaymodel.ErrorWithStatusCode{
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Error: relaymodel.Error{
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/songquanpeng/one-api/relay/adaptor"
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||
)
|
||||
|
||||
type Adaptor struct {
|
||||
@@ -28,7 +29,14 @@ func (a *Adaptor) Init(meta *meta.Meta) {
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
return fmt.Sprintf("%s/client/v4/accounts/%s/ai/run/%s", meta.BaseURL, meta.Config.UserID, meta.ActualModelName), nil
|
||||
switch meta.Mode {
|
||||
case relaymode.ChatCompletions:
|
||||
return fmt.Sprintf("%s/client/v4/accounts/%s/ai/v1/chat/completions", meta.BaseURL, meta.Config.UserID), nil
|
||||
case relaymode.Embeddings:
|
||||
return fmt.Sprintf("%s/client/v4/accounts/%s/ai/v1/embeddings", meta.BaseURL, meta.Config.UserID), nil
|
||||
default:
|
||||
return fmt.Sprintf("%s/client/v4/accounts/%s/ai/run/%s", meta.BaseURL, meta.Config.UserID, meta.ActualModelName), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *meta.Meta) error {
|
||||
@@ -41,7 +49,14 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
|
||||
if request == nil {
|
||||
return nil, errors.New("request is nil")
|
||||
}
|
||||
return ConvertRequest(*request), nil
|
||||
switch relayMode {
|
||||
case relaymode.Completions:
|
||||
return ConvertCompletionsRequest(*request), nil
|
||||
case relaymode.ChatCompletions, relaymode.Embeddings:
|
||||
return request, nil
|
||||
default:
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Adaptor) DoRequest(c *gin.Context, meta *meta.Meta, requestBody io.Reader) (*http.Response, error) {
|
||||
|
||||
@@ -3,11 +3,13 @@ package cloudflare
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
@@ -16,57 +18,23 @@ import (
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
|
||||
var promptBuilder strings.Builder
|
||||
for _, message := range textRequest.Messages {
|
||||
promptBuilder.WriteString(message.StringContent())
|
||||
promptBuilder.WriteString("\n") // 添加换行符来分隔每个消息
|
||||
}
|
||||
|
||||
func ConvertCompletionsRequest(textRequest model.GeneralOpenAIRequest) *Request {
|
||||
p, _ := textRequest.Prompt.(string)
|
||||
return &Request{
|
||||
Prompt: p,
|
||||
MaxTokens: textRequest.MaxTokens,
|
||||
Prompt: promptBuilder.String(),
|
||||
Stream: textRequest.Stream,
|
||||
Temperature: textRequest.Temperature,
|
||||
}
|
||||
}
|
||||
|
||||
func ResponseCloudflare2OpenAI(cloudflareResponse *Response) *openai.TextResponse {
|
||||
choice := openai.TextResponseChoice{
|
||||
Index: 0,
|
||||
Message: model.Message{
|
||||
Role: "assistant",
|
||||
Content: cloudflareResponse.Result.Response,
|
||||
},
|
||||
FinishReason: "stop",
|
||||
}
|
||||
fullTextResponse := openai.TextResponse{
|
||||
Object: "chat.completion",
|
||||
Created: helper.GetTimestamp(),
|
||||
Choices: []openai.TextResponseChoice{choice},
|
||||
}
|
||||
return &fullTextResponse
|
||||
}
|
||||
|
||||
func StreamResponseCloudflare2OpenAI(cloudflareResponse *StreamResponse) *openai.ChatCompletionsStreamResponse {
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = cloudflareResponse.Response
|
||||
choice.Delta.Role = "assistant"
|
||||
openaiResponse := openai.ChatCompletionsStreamResponse{
|
||||
Object: "chat.completion.chunk",
|
||||
Choices: []openai.ChatCompletionsStreamResponseChoice{choice},
|
||||
Created: helper.GetTimestamp(),
|
||||
}
|
||||
return &openaiResponse
|
||||
}
|
||||
|
||||
func StreamHandler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) {
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
common.SetEventStreamHeaders(c)
|
||||
id := helper.GetResponseID(c)
|
||||
responseModel := c.GetString("original_model")
|
||||
responseModel := c.GetString(ctxkey.OriginalModel)
|
||||
var responseText string
|
||||
|
||||
for scanner.Scan() {
|
||||
@@ -77,22 +45,22 @@ func StreamHandler(c *gin.Context, resp *http.Response, promptTokens int, modelN
|
||||
data = strings.TrimPrefix(data, "data: ")
|
||||
data = strings.TrimSuffix(data, "\r")
|
||||
|
||||
var cloudflareResponse StreamResponse
|
||||
err := json.Unmarshal([]byte(data), &cloudflareResponse)
|
||||
if data == "[DONE]" {
|
||||
break
|
||||
}
|
||||
|
||||
var response openai.ChatCompletionsStreamResponse
|
||||
err := json.Unmarshal([]byte(data), &response)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
response := StreamResponseCloudflare2OpenAI(&cloudflareResponse)
|
||||
if response == nil {
|
||||
continue
|
||||
for _, v := range response.Choices {
|
||||
v.Delta.Role = "assistant"
|
||||
responseText += v.Delta.StringContent()
|
||||
}
|
||||
|
||||
responseText += cloudflareResponse.Response
|
||||
response.Id = id
|
||||
response.Model = responseModel
|
||||
|
||||
response.Model = modelName
|
||||
err = render.ObjectData(c, response)
|
||||
if err != nil {
|
||||
logger.SysError(err.Error())
|
||||
@@ -123,22 +91,25 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName st
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
var cloudflareResponse Response
|
||||
err = json.Unmarshal(responseBody, &cloudflareResponse)
|
||||
var response openai.TextResponse
|
||||
err = json.Unmarshal(responseBody, &response)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
fullTextResponse := ResponseCloudflare2OpenAI(&cloudflareResponse)
|
||||
fullTextResponse.Model = modelName
|
||||
usage := openai.ResponseText2Usage(cloudflareResponse.Result.Response, modelName, promptTokens)
|
||||
fullTextResponse.Usage = *usage
|
||||
fullTextResponse.Id = helper.GetResponseID(c)
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
response.Model = modelName
|
||||
var responseText string
|
||||
for _, v := range response.Choices {
|
||||
responseText += v.Message.Content.(string)
|
||||
}
|
||||
usage := openai.ResponseText2Usage(responseText, modelName, promptTokens)
|
||||
response.Usage = *usage
|
||||
response.Id = helper.GetResponseID(c)
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
c.Writer.Header().Set("Content-Type", "application/json")
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
_, err = c.Writer.Write(jsonResponse)
|
||||
_, _ = c.Writer.Write(jsonResponse)
|
||||
return nil, usage
|
||||
}
|
||||
|
||||
@@ -1,25 +1,13 @@
|
||||
package cloudflare
|
||||
|
||||
import "github.com/songquanpeng/one-api/relay/model"
|
||||
|
||||
type Request struct {
|
||||
Lora string `json:"lora,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
Raw bool `json:"raw,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
Response string `json:"response"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
Result Result `json:"result"`
|
||||
Success bool `json:"success"`
|
||||
Errors []string `json:"errors"`
|
||||
Messages []string `json:"messages"`
|
||||
}
|
||||
|
||||
type StreamResponse struct {
|
||||
Response string `json:"response"`
|
||||
Messages []model.Message `json:"messages,omitempty"`
|
||||
Lora string `json:"lora,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
Raw bool `json:"raw,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
}
|
||||
|
||||
19
relay/adaptor/novita/constants.go
Normal file
19
relay/adaptor/novita/constants.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package novita
|
||||
|
||||
// https://novita.ai/llm-api
|
||||
|
||||
var ModelList = []string{
|
||||
"meta-llama/llama-3-8b-instruct",
|
||||
"meta-llama/llama-3-70b-instruct",
|
||||
"nousresearch/hermes-2-pro-llama-3-8b",
|
||||
"nousresearch/nous-hermes-llama2-13b",
|
||||
"mistralai/mistral-7b-instruct",
|
||||
"cognitivecomputations/dolphin-mixtral-8x22b",
|
||||
"sao10k/l3-70b-euryale-v2.1",
|
||||
"sophosympatheia/midnight-rose-70b",
|
||||
"gryphe/mythomax-l2-13b",
|
||||
"Nous-Hermes-2-Mixtral-8x7B-DPO",
|
||||
"lzlv_70b",
|
||||
"teknium/openhermes-2.5-mistral-7b",
|
||||
"microsoft/wizardlm-2-8x22b",
|
||||
}
|
||||
15
relay/adaptor/novita/main.go
Normal file
15
relay/adaptor/novita/main.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package novita
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||
)
|
||||
|
||||
func GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
if meta.Mode == relaymode.ChatCompletions {
|
||||
return fmt.Sprintf("%s/chat/completions", meta.BaseURL), nil
|
||||
}
|
||||
return "", fmt.Errorf("unsupported relay mode %d for novita", meta.Mode)
|
||||
}
|
||||
@@ -2,18 +2,20 @@ package openai
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/Laisky/errors/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/doubao"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/minimax"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/novita"
|
||||
"github.com/songquanpeng/one-api/relay/channeltype"
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Adaptor struct {
|
||||
@@ -48,6 +50,8 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
return minimax.GetRequestURL(meta)
|
||||
case channeltype.Doubao:
|
||||
return doubao.GetRequestURL(meta)
|
||||
case channeltype.Novita:
|
||||
return novita.GetRequestURL(meta)
|
||||
default:
|
||||
return GetFullRequestURL(meta.BaseURL, meta.RequestURLPath, meta.ChannelType), nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/minimax"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/mistral"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/moonshot"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/novita"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/stepfun"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/togetherai"
|
||||
"github.com/songquanpeng/one-api/relay/channeltype"
|
||||
@@ -28,6 +29,7 @@ var CompatibleChannels = []int{
|
||||
channeltype.StepFun,
|
||||
channeltype.DeepSeek,
|
||||
channeltype.TogetherAI,
|
||||
channeltype.Novita,
|
||||
}
|
||||
|
||||
func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
||||
@@ -56,6 +58,8 @@ func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
||||
return "together.ai", togetherai.ModelList
|
||||
case channeltype.Doubao:
|
||||
return "doubao", doubao.ModelList
|
||||
case channeltype.Novita:
|
||||
return "novita", novita.ModelList
|
||||
default:
|
||||
return "openai", ModelList
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user