mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-11-09 10:13:42 +08:00
Merge remote-tracking branch 'origin/upstream/main' into patch/images-edits
This commit is contained in:
@@ -4,6 +4,12 @@ import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
@@ -12,10 +18,6 @@ import (
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
"github.com/songquanpeng/one-api/relay/constant"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// https://docs.aiproxy.io/dev/library#使用已经定制好的知识库进行对话问答
|
||||
@@ -89,6 +91,7 @@ func streamResponseAIProxyLibrary2OpenAI(response *LibraryStreamResponse) *opena
|
||||
|
||||
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
|
||||
var usage model.Usage
|
||||
var documents []LibraryDocument
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
@@ -102,60 +105,48 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
||||
}
|
||||
return 0, nil, nil
|
||||
})
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 5 { // ignore blank line or wrong format
|
||||
continue
|
||||
}
|
||||
if data[:5] != "data:" {
|
||||
continue
|
||||
}
|
||||
data = data[5:]
|
||||
dataChan <- data
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
|
||||
common.SetEventStreamHeaders(c)
|
||||
var documents []LibraryDocument
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
var AIProxyLibraryResponse LibraryStreamResponse
|
||||
err := json.Unmarshal([]byte(data), &AIProxyLibraryResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
if len(AIProxyLibraryResponse.Documents) != 0 {
|
||||
documents = AIProxyLibraryResponse.Documents
|
||||
}
|
||||
response := streamResponseAIProxyLibrary2OpenAI(&AIProxyLibraryResponse)
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
|
||||
return true
|
||||
case <-stopChan:
|
||||
response := documentsAIProxyLibrary(documents)
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
|
||||
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
|
||||
return false
|
||||
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 5 || data[:5] != "data:" {
|
||||
continue
|
||||
}
|
||||
})
|
||||
err := resp.Body.Close()
|
||||
data = data[5:]
|
||||
|
||||
var AIProxyLibraryResponse LibraryStreamResponse
|
||||
err := json.Unmarshal([]byte(data), &AIProxyLibraryResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
continue
|
||||
}
|
||||
if len(AIProxyLibraryResponse.Documents) != 0 {
|
||||
documents = AIProxyLibraryResponse.Documents
|
||||
}
|
||||
response := streamResponseAIProxyLibrary2OpenAI(&AIProxyLibraryResponse)
|
||||
err = render.ObjectData(c, response)
|
||||
if err != nil {
|
||||
logger.SysError(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
logger.SysError("error reading stream: " + err.Error())
|
||||
}
|
||||
|
||||
response := documentsAIProxyLibrary(documents)
|
||||
err := render.ObjectData(c, response)
|
||||
if err != nil {
|
||||
logger.SysError(err.Error())
|
||||
}
|
||||
render.Done(c)
|
||||
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
|
||||
return nil, &usage
|
||||
}
|
||||
|
||||
|
||||
@@ -3,15 +3,17 @@ package ali
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// https://help.aliyun.com/document_detail/613695.html?spm=a2c4g.2399480.0.0.1adb778fAdzP9w#341800c0f8w0r
|
||||
@@ -181,56 +183,43 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
||||
}
|
||||
return 0, nil, nil
|
||||
})
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 5 { // ignore blank line or wrong format
|
||||
continue
|
||||
}
|
||||
if data[:5] != "data:" {
|
||||
continue
|
||||
}
|
||||
data = data[5:]
|
||||
dataChan <- data
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
|
||||
common.SetEventStreamHeaders(c)
|
||||
//lastResponseText := ""
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
var aliResponse ChatResponse
|
||||
err := json.Unmarshal([]byte(data), &aliResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
if aliResponse.Usage.OutputTokens != 0 {
|
||||
usage.PromptTokens = aliResponse.Usage.InputTokens
|
||||
usage.CompletionTokens = aliResponse.Usage.OutputTokens
|
||||
usage.TotalTokens = aliResponse.Usage.InputTokens + aliResponse.Usage.OutputTokens
|
||||
}
|
||||
response := streamResponseAli2OpenAI(&aliResponse)
|
||||
if response == nil {
|
||||
return true
|
||||
}
|
||||
//response.Choices[0].Delta.Content = strings.TrimPrefix(response.Choices[0].Delta.Content, lastResponseText)
|
||||
//lastResponseText = aliResponse.Output.Text
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
|
||||
return true
|
||||
case <-stopChan:
|
||||
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
|
||||
return false
|
||||
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 5 || data[:5] != "data:" {
|
||||
continue
|
||||
}
|
||||
})
|
||||
data = data[5:]
|
||||
|
||||
var aliResponse ChatResponse
|
||||
err := json.Unmarshal([]byte(data), &aliResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
continue
|
||||
}
|
||||
if aliResponse.Usage.OutputTokens != 0 {
|
||||
usage.PromptTokens = aliResponse.Usage.InputTokens
|
||||
usage.CompletionTokens = aliResponse.Usage.OutputTokens
|
||||
usage.TotalTokens = aliResponse.Usage.InputTokens + aliResponse.Usage.OutputTokens
|
||||
}
|
||||
response := streamResponseAli2OpenAI(&aliResponse)
|
||||
if response == nil {
|
||||
continue
|
||||
}
|
||||
err = render.ObjectData(c, response)
|
||||
if err != nil {
|
||||
logger.SysError(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
logger.SysError("error reading stream: " + err.Error())
|
||||
}
|
||||
|
||||
render.Done(c)
|
||||
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
|
||||
@@ -5,4 +5,5 @@ var ModelList = []string{
|
||||
"claude-3-haiku-20240307",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-5-sonnet-20240620",
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -28,12 +29,30 @@ func stopReasonClaude2OpenAI(reason *string) string {
|
||||
return "stop"
|
||||
case "max_tokens":
|
||||
return "length"
|
||||
case "tool_use":
|
||||
return "tool_calls"
|
||||
default:
|
||||
return *reason
|
||||
}
|
||||
}
|
||||
|
||||
func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
|
||||
claudeTools := make([]Tool, 0, len(textRequest.Tools))
|
||||
|
||||
for _, tool := range textRequest.Tools {
|
||||
if params, ok := tool.Function.Parameters.(map[string]any); ok {
|
||||
claudeTools = append(claudeTools, Tool{
|
||||
Name: tool.Function.Name,
|
||||
Description: tool.Function.Description,
|
||||
InputSchema: InputSchema{
|
||||
Type: params["type"].(string),
|
||||
Properties: params["properties"],
|
||||
Required: params["required"],
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
claudeRequest := Request{
|
||||
Model: textRequest.Model,
|
||||
MaxTokens: textRequest.MaxTokens,
|
||||
@@ -41,6 +60,24 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
|
||||
TopP: textRequest.TopP,
|
||||
TopK: textRequest.TopK,
|
||||
Stream: textRequest.Stream,
|
||||
Tools: claudeTools,
|
||||
}
|
||||
if len(claudeTools) > 0 {
|
||||
claudeToolChoice := struct {
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name,omitempty"`
|
||||
}{Type: "auto"} // default value https://docs.anthropic.com/en/docs/build-with-claude/tool-use#controlling-claudes-output
|
||||
if choice, ok := textRequest.ToolChoice.(map[string]any); ok {
|
||||
if function, ok := choice["function"].(map[string]any); ok {
|
||||
claudeToolChoice.Type = "tool"
|
||||
claudeToolChoice.Name = function["name"].(string)
|
||||
}
|
||||
} else if toolChoiceType, ok := textRequest.ToolChoice.(string); ok {
|
||||
if toolChoiceType == "any" {
|
||||
claudeToolChoice.Type = toolChoiceType
|
||||
}
|
||||
}
|
||||
claudeRequest.ToolChoice = claudeToolChoice
|
||||
}
|
||||
if claudeRequest.MaxTokens == 0 {
|
||||
claudeRequest.MaxTokens = 4096
|
||||
@@ -63,7 +100,24 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
|
||||
if message.IsStringContent() {
|
||||
content.Type = "text"
|
||||
content.Text = message.StringContent()
|
||||
if message.Role == "tool" {
|
||||
claudeMessage.Role = "user"
|
||||
content.Type = "tool_result"
|
||||
content.Content = content.Text
|
||||
content.Text = ""
|
||||
content.ToolUseId = message.ToolCallId
|
||||
}
|
||||
claudeMessage.Content = append(claudeMessage.Content, content)
|
||||
for i := range message.ToolCalls {
|
||||
inputParam := make(map[string]any)
|
||||
_ = json.Unmarshal([]byte(message.ToolCalls[i].Function.Arguments.(string)), &inputParam)
|
||||
claudeMessage.Content = append(claudeMessage.Content, Content{
|
||||
Type: "tool_use",
|
||||
Id: message.ToolCalls[i].Id,
|
||||
Name: message.ToolCalls[i].Function.Name,
|
||||
Input: inputParam,
|
||||
})
|
||||
}
|
||||
claudeRequest.Messages = append(claudeRequest.Messages, claudeMessage)
|
||||
continue
|
||||
}
|
||||
@@ -96,16 +150,35 @@ func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCo
|
||||
var response *Response
|
||||
var responseText string
|
||||
var stopReason string
|
||||
tools := make([]model.Tool, 0)
|
||||
|
||||
switch claudeResponse.Type {
|
||||
case "message_start":
|
||||
return nil, claudeResponse.Message
|
||||
case "content_block_start":
|
||||
if claudeResponse.ContentBlock != nil {
|
||||
responseText = claudeResponse.ContentBlock.Text
|
||||
if claudeResponse.ContentBlock.Type == "tool_use" {
|
||||
tools = append(tools, model.Tool{
|
||||
Id: claudeResponse.ContentBlock.Id,
|
||||
Type: "function",
|
||||
Function: model.Function{
|
||||
Name: claudeResponse.ContentBlock.Name,
|
||||
Arguments: "",
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
case "content_block_delta":
|
||||
if claudeResponse.Delta != nil {
|
||||
responseText = claudeResponse.Delta.Text
|
||||
if claudeResponse.Delta.Type == "input_json_delta" {
|
||||
tools = append(tools, model.Tool{
|
||||
Function: model.Function{
|
||||
Arguments: claudeResponse.Delta.PartialJson,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
case "message_delta":
|
||||
if claudeResponse.Usage != nil {
|
||||
@@ -119,6 +192,10 @@ func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCo
|
||||
}
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = responseText
|
||||
if len(tools) > 0 {
|
||||
choice.Delta.Content = nil // compatible with other OpenAI derivative applications, like LobeOpenAICompatibleFactory ...
|
||||
choice.Delta.ToolCalls = tools
|
||||
}
|
||||
choice.Delta.Role = "assistant"
|
||||
finishReason := stopReasonClaude2OpenAI(&stopReason)
|
||||
if finishReason != "null" {
|
||||
@@ -135,12 +212,27 @@ func ResponseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse {
|
||||
if len(claudeResponse.Content) > 0 {
|
||||
responseText = claudeResponse.Content[0].Text
|
||||
}
|
||||
tools := make([]model.Tool, 0)
|
||||
for _, v := range claudeResponse.Content {
|
||||
if v.Type == "tool_use" {
|
||||
args, _ := json.Marshal(v.Input)
|
||||
tools = append(tools, model.Tool{
|
||||
Id: v.Id,
|
||||
Type: "function", // compatible with other OpenAI derivative applications
|
||||
Function: model.Function{
|
||||
Name: v.Name,
|
||||
Arguments: string(args),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
choice := openai.TextResponseChoice{
|
||||
Index: 0,
|
||||
Message: model.Message{
|
||||
Role: "assistant",
|
||||
Content: responseText,
|
||||
Name: nil,
|
||||
Role: "assistant",
|
||||
Content: responseText,
|
||||
Name: nil,
|
||||
ToolCalls: tools,
|
||||
},
|
||||
FinishReason: stopReasonClaude2OpenAI(claudeResponse.StopReason),
|
||||
}
|
||||
@@ -169,64 +261,77 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
||||
}
|
||||
return 0, nil, nil
|
||||
})
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 6 {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(data, "data:") {
|
||||
continue
|
||||
}
|
||||
data = strings.TrimPrefix(data, "data:")
|
||||
dataChan <- data
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
|
||||
common.SetEventStreamHeaders(c)
|
||||
|
||||
var usage model.Usage
|
||||
var modelName string
|
||||
var id string
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
// some implementations may add \r at the end of data
|
||||
data = strings.TrimSpace(data)
|
||||
var claudeResponse StreamResponse
|
||||
err := json.Unmarshal([]byte(data), &claudeResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
response, meta := StreamResponseClaude2OpenAI(&claudeResponse)
|
||||
if meta != nil {
|
||||
usage.PromptTokens += meta.Usage.InputTokens
|
||||
usage.CompletionTokens += meta.Usage.OutputTokens
|
||||
var lastToolCallChoice openai.ChatCompletionsStreamResponseChoice
|
||||
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 6 || !strings.HasPrefix(data, "data:") {
|
||||
continue
|
||||
}
|
||||
data = strings.TrimPrefix(data, "data:")
|
||||
data = strings.TrimSpace(data)
|
||||
|
||||
var claudeResponse StreamResponse
|
||||
err := json.Unmarshal([]byte(data), &claudeResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
response, meta := StreamResponseClaude2OpenAI(&claudeResponse)
|
||||
if meta != nil {
|
||||
usage.PromptTokens += meta.Usage.InputTokens
|
||||
usage.CompletionTokens += meta.Usage.OutputTokens
|
||||
if len(meta.Id) > 0 { // only message_start has an id, otherwise it's a finish_reason event.
|
||||
modelName = meta.Model
|
||||
id = fmt.Sprintf("chatcmpl-%s", meta.Id)
|
||||
return true
|
||||
continue
|
||||
} else { // finish_reason case
|
||||
if len(lastToolCallChoice.Delta.ToolCalls) > 0 {
|
||||
lastArgs := &lastToolCallChoice.Delta.ToolCalls[len(lastToolCallChoice.Delta.ToolCalls)-1].Function
|
||||
if len(lastArgs.Arguments.(string)) == 0 { // compatible with OpenAI sending an empty object `{}` when no arguments.
|
||||
lastArgs.Arguments = "{}"
|
||||
response.Choices[len(response.Choices)-1].Delta.Content = nil
|
||||
response.Choices[len(response.Choices)-1].Delta.ToolCalls = lastToolCallChoice.Delta.ToolCalls
|
||||
}
|
||||
}
|
||||
}
|
||||
if response == nil {
|
||||
return true
|
||||
}
|
||||
response.Id = id
|
||||
response.Model = modelName
|
||||
response.Created = createdTime
|
||||
jsonStr, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonStr)})
|
||||
return true
|
||||
case <-stopChan:
|
||||
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
|
||||
return false
|
||||
}
|
||||
})
|
||||
_ = resp.Body.Close()
|
||||
if response == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
response.Id = id
|
||||
response.Model = modelName
|
||||
response.Created = createdTime
|
||||
|
||||
for _, choice := range response.Choices {
|
||||
if len(choice.Delta.ToolCalls) > 0 {
|
||||
lastToolCallChoice = choice
|
||||
}
|
||||
}
|
||||
err = render.ObjectData(c, response)
|
||||
if err != nil {
|
||||
logger.SysError(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
logger.SysError("error reading stream: " + err.Error())
|
||||
}
|
||||
|
||||
render.Done(c)
|
||||
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
return nil, &usage
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,12 @@ type Content struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text,omitempty"`
|
||||
Source *ImageSource `json:"source,omitempty"`
|
||||
// tool_calls
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Input any `json:"input,omitempty"`
|
||||
Content string `json:"content,omitempty"`
|
||||
ToolUseId string `json:"tool_use_id,omitempty"`
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
@@ -23,6 +29,18 @@ type Message struct {
|
||||
Content []Content `json:"content"`
|
||||
}
|
||||
|
||||
type Tool struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
InputSchema InputSchema `json:"input_schema"`
|
||||
}
|
||||
|
||||
type InputSchema struct {
|
||||
Type string `json:"type"`
|
||||
Properties any `json:"properties,omitempty"`
|
||||
Required any `json:"required,omitempty"`
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
Model string `json:"model"`
|
||||
Messages []Message `json:"messages"`
|
||||
@@ -33,6 +51,8 @@ type Request struct {
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
Tools []Tool `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
//Metadata `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
@@ -61,6 +81,7 @@ type Response struct {
|
||||
type Delta struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text"`
|
||||
PartialJson string `json:"partial_json,omitempty"`
|
||||
StopReason *string `json:"stop_reason"`
|
||||
StopSequence *string `json:"stop_sequence"`
|
||||
}
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/anthropic"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/aws/utils"
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
@@ -19,18 +18,52 @@ import (
|
||||
var _ adaptor.Adaptor = new(Adaptor)
|
||||
|
||||
type Adaptor struct {
|
||||
meta *meta.Meta
|
||||
awsClient *bedrockruntime.Client
|
||||
awsAdapter utils.AwsAdapter
|
||||
|
||||
Meta *meta.Meta
|
||||
AwsClient *bedrockruntime.Client
|
||||
}
|
||||
|
||||
func (a *Adaptor) Init(meta *meta.Meta) {
|
||||
a.meta = meta
|
||||
a.awsClient = bedrockruntime.New(bedrockruntime.Options{
|
||||
a.Meta = meta
|
||||
a.AwsClient = bedrockruntime.New(bedrockruntime.Options{
|
||||
Region: meta.Config.Region,
|
||||
Credentials: aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(meta.Config.AK, meta.Config.SK, "")),
|
||||
})
|
||||
}
|
||||
|
||||
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
|
||||
if request == nil {
|
||||
return nil, errors.New("request is nil")
|
||||
}
|
||||
|
||||
adaptor := GetAdaptor(request.Model)
|
||||
if adaptor == nil {
|
||||
return nil, errors.New("adaptor not found")
|
||||
}
|
||||
|
||||
a.awsAdapter = adaptor
|
||||
return adaptor.ConvertRequest(c, relayMode, request)
|
||||
}
|
||||
|
||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
|
||||
if a.awsAdapter == nil {
|
||||
return nil, utils.WrapErr(errors.New("awsAdapter is nil"))
|
||||
}
|
||||
return a.awsAdapter.DoResponse(c, a.AwsClient, meta)
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetModelList() (models []string) {
|
||||
for model := range adaptors {
|
||||
models = append(models, model)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetChannelName() string {
|
||||
return "aws"
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
@@ -39,17 +72,6 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *me
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
|
||||
if request == nil {
|
||||
return nil, errors.New("request is nil")
|
||||
}
|
||||
|
||||
claudeReq := anthropic.ConvertRequest(*request)
|
||||
c.Set(ctxkey.RequestModel, request.Model)
|
||||
c.Set(ctxkey.ConvertedRequest, claudeReq)
|
||||
return claudeReq, nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
|
||||
if request == nil {
|
||||
return nil, errors.New("request is nil")
|
||||
@@ -60,23 +82,3 @@ func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error)
|
||||
func (a *Adaptor) DoRequest(c *gin.Context, meta *meta.Meta, requestBody io.Reader) (*http.Response, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
|
||||
if meta.IsStream {
|
||||
err, usage = StreamHandler(c, a.awsClient)
|
||||
} else {
|
||||
err, usage = Handler(c, a.awsClient, meta.ActualModelName)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetModelList() (models []string) {
|
||||
for n := range awsModelIDMap {
|
||||
models = append(models, n)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetChannelName() string {
|
||||
return "aws"
|
||||
}
|
||||
37
relay/adaptor/aws/claude/adapter.go
Normal file
37
relay/adaptor/aws/claude/adapter.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/anthropic"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/aws/utils"
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
var _ utils.AwsAdapter = new(Adaptor)
|
||||
|
||||
type Adaptor struct {
|
||||
}
|
||||
|
||||
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
|
||||
if request == nil {
|
||||
return nil, errors.New("request is nil")
|
||||
}
|
||||
|
||||
claudeReq := anthropic.ConvertRequest(*request)
|
||||
c.Set(ctxkey.RequestModel, request.Model)
|
||||
c.Set(ctxkey.ConvertedRequest, claudeReq)
|
||||
return claudeReq, nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) DoResponse(c *gin.Context, awsCli *bedrockruntime.Client, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
|
||||
if meta.IsStream {
|
||||
err, usage = StreamHandler(c, awsCli)
|
||||
} else {
|
||||
err, usage = Handler(c, awsCli, meta.ActualModelName)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
@@ -16,33 +15,28 @@ import (
|
||||
"github.com/jinzhu/copier"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/anthropic"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/aws/utils"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
func wrapErr(err error) *relaymodel.ErrorWithStatusCode {
|
||||
return &relaymodel.ErrorWithStatusCode{
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Error: relaymodel.Error{
|
||||
Message: fmt.Sprintf("%s", err.Error()),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
|
||||
var awsModelIDMap = map[string]string{
|
||||
"claude-instant-1.2": "anthropic.claude-instant-v1",
|
||||
"claude-2.0": "anthropic.claude-v2",
|
||||
"claude-2.1": "anthropic.claude-v2:1",
|
||||
"claude-3-sonnet-20240229": "anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"claude-3-opus-20240229": "anthropic.claude-3-opus-20240229-v1:0",
|
||||
"claude-3-haiku-20240307": "anthropic.claude-3-haiku-20240307-v1:0",
|
||||
var AwsModelIDMap = map[string]string{
|
||||
"claude-instant-1.2": "anthropic.claude-instant-v1",
|
||||
"claude-2.0": "anthropic.claude-v2",
|
||||
"claude-2.1": "anthropic.claude-v2:1",
|
||||
"claude-3-sonnet-20240229": "anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"claude-3-5-sonnet-20240620": "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"claude-3-opus-20240229": "anthropic.claude-3-opus-20240229-v1:0",
|
||||
"claude-3-haiku-20240307": "anthropic.claude-3-haiku-20240307-v1:0",
|
||||
}
|
||||
|
||||
func awsModelID(requestModel string) (string, error) {
|
||||
if awsModelID, ok := awsModelIDMap[requestModel]; ok {
|
||||
if awsModelID, ok := AwsModelIDMap[requestModel]; ok {
|
||||
return awsModelID, nil
|
||||
}
|
||||
|
||||
@@ -52,7 +46,7 @@ func awsModelID(requestModel string) (string, error) {
|
||||
func Handler(c *gin.Context, awsCli *bedrockruntime.Client, modelName string) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) {
|
||||
awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel))
|
||||
if err != nil {
|
||||
return wrapErr(errors.Wrap(err, "awsModelID")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
|
||||
}
|
||||
|
||||
awsReq := &bedrockruntime.InvokeModelInput{
|
||||
@@ -63,30 +57,30 @@ func Handler(c *gin.Context, awsCli *bedrockruntime.Client, modelName string) (*
|
||||
|
||||
claudeReq_, ok := c.Get(ctxkey.ConvertedRequest)
|
||||
if !ok {
|
||||
return wrapErr(errors.New("request not found")), nil
|
||||
return utils.WrapErr(errors.New("request not found")), nil
|
||||
}
|
||||
claudeReq := claudeReq_.(*anthropic.Request)
|
||||
awsClaudeReq := &Request{
|
||||
AnthropicVersion: "bedrock-2023-05-31",
|
||||
}
|
||||
if err = copier.Copy(awsClaudeReq, claudeReq); err != nil {
|
||||
return wrapErr(errors.Wrap(err, "copy request")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "copy request")), nil
|
||||
}
|
||||
|
||||
awsReq.Body, err = json.Marshal(awsClaudeReq)
|
||||
if err != nil {
|
||||
return wrapErr(errors.Wrap(err, "marshal request")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "marshal request")), nil
|
||||
}
|
||||
|
||||
awsResp, err := awsCli.InvokeModel(c.Request.Context(), awsReq)
|
||||
if err != nil {
|
||||
return wrapErr(errors.Wrap(err, "InvokeModel")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "InvokeModel")), nil
|
||||
}
|
||||
|
||||
claudeResponse := new(anthropic.Response)
|
||||
err = json.Unmarshal(awsResp.Body, claudeResponse)
|
||||
if err != nil {
|
||||
return wrapErr(errors.Wrap(err, "unmarshal response")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "unmarshal response")), nil
|
||||
}
|
||||
|
||||
openaiResp := anthropic.ResponseClaude2OpenAI(claudeResponse)
|
||||
@@ -106,7 +100,7 @@ func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.E
|
||||
createdTime := helper.GetTimestamp()
|
||||
awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel))
|
||||
if err != nil {
|
||||
return wrapErr(errors.Wrap(err, "awsModelID")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
|
||||
}
|
||||
|
||||
awsReq := &bedrockruntime.InvokeModelWithResponseStreamInput{
|
||||
@@ -117,7 +111,7 @@ func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.E
|
||||
|
||||
claudeReq_, ok := c.Get(ctxkey.ConvertedRequest)
|
||||
if !ok {
|
||||
return wrapErr(errors.New("request not found")), nil
|
||||
return utils.WrapErr(errors.New("request not found")), nil
|
||||
}
|
||||
claudeReq := claudeReq_.(*anthropic.Request)
|
||||
|
||||
@@ -125,16 +119,16 @@ func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.E
|
||||
AnthropicVersion: "bedrock-2023-05-31",
|
||||
}
|
||||
if err = copier.Copy(awsClaudeReq, claudeReq); err != nil {
|
||||
return wrapErr(errors.Wrap(err, "copy request")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "copy request")), nil
|
||||
}
|
||||
awsReq.Body, err = json.Marshal(awsClaudeReq)
|
||||
if err != nil {
|
||||
return wrapErr(errors.Wrap(err, "marshal request")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "marshal request")), nil
|
||||
}
|
||||
|
||||
awsResp, err := awsCli.InvokeModelWithResponseStream(c.Request.Context(), awsReq)
|
||||
if err != nil {
|
||||
return wrapErr(errors.Wrap(err, "InvokeModelWithResponseStream")), nil
|
||||
return utils.WrapErr(errors.Wrap(err, "InvokeModelWithResponseStream")), nil
|
||||
}
|
||||
stream := awsResp.GetStream()
|
||||
defer stream.Close()
|
||||
@@ -142,6 +136,8 @@ func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.E
|
||||
c.Writer.Header().Set("Content-Type", "text/event-stream")
|
||||
var usage relaymodel.Usage
|
||||
var id string
|
||||
var lastToolCallChoice openai.ChatCompletionsStreamResponseChoice
|
||||
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
event, ok := <-stream.Events()
|
||||
if !ok {
|
||||
@@ -162,8 +158,19 @@ func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.E
|
||||
if meta != nil {
|
||||
usage.PromptTokens += meta.Usage.InputTokens
|
||||
usage.CompletionTokens += meta.Usage.OutputTokens
|
||||
id = fmt.Sprintf("chatcmpl-%s", meta.Id)
|
||||
return true
|
||||
if len(meta.Id) > 0 { // only message_start has an id, otherwise it's a finish_reason event.
|
||||
id = fmt.Sprintf("chatcmpl-%s", meta.Id)
|
||||
return true
|
||||
} else { // finish_reason case
|
||||
if len(lastToolCallChoice.Delta.ToolCalls) > 0 {
|
||||
lastArgs := &lastToolCallChoice.Delta.ToolCalls[len(lastToolCallChoice.Delta.ToolCalls)-1].Function
|
||||
if len(lastArgs.Arguments.(string)) == 0 { // compatible with OpenAI sending an empty object `{}` when no arguments.
|
||||
lastArgs.Arguments = "{}"
|
||||
response.Choices[len(response.Choices)-1].Delta.Content = nil
|
||||
response.Choices[len(response.Choices)-1].Delta.ToolCalls = lastToolCallChoice.Delta.ToolCalls
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if response == nil {
|
||||
return true
|
||||
@@ -171,6 +178,12 @@ func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.E
|
||||
response.Id = id
|
||||
response.Model = c.GetString(ctxkey.OriginalModel)
|
||||
response.Created = createdTime
|
||||
|
||||
for _, choice := range response.Choices {
|
||||
if len(choice.Delta.ToolCalls) > 0 {
|
||||
lastToolCallChoice = choice
|
||||
}
|
||||
}
|
||||
jsonStr, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
@@ -9,9 +9,12 @@ type Request struct {
|
||||
// AnthropicVersion should be "bedrock-2023-05-31"
|
||||
AnthropicVersion string `json:"anthropic_version"`
|
||||
Messages []anthropic.Message `json:"messages"`
|
||||
System string `json:"system,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
Tools []anthropic.Tool `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
}
|
||||
37
relay/adaptor/aws/llama3/adapter.go
Normal file
37
relay/adaptor/aws/llama3/adapter.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/aws/utils"
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
var _ utils.AwsAdapter = new(Adaptor)
|
||||
|
||||
type Adaptor struct {
|
||||
}
|
||||
|
||||
func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error) {
|
||||
if request == nil {
|
||||
return nil, errors.New("request is nil")
|
||||
}
|
||||
|
||||
llamaReq := ConvertRequest(*request)
|
||||
c.Set(ctxkey.RequestModel, request.Model)
|
||||
c.Set(ctxkey.ConvertedRequest, llamaReq)
|
||||
return llamaReq, nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) DoResponse(c *gin.Context, awsCli *bedrockruntime.Client, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode) {
|
||||
if meta.IsStream {
|
||||
err, usage = StreamHandler(c, awsCli)
|
||||
} else {
|
||||
err, usage = Handler(c, awsCli, meta.ActualModelName)
|
||||
}
|
||||
return
|
||||
}
|
||||
231
relay/adaptor/aws/llama3/main.go
Normal file
231
relay/adaptor/aws/llama3/main.go
Normal file
@@ -0,0 +1,231 @@
|
||||
// Package aws provides the AWS adaptor for the relay service.
|
||||
package aws
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"text/template"
|
||||
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
"github.com/songquanpeng/one-api/common/random"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime/types"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/aws/utils"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
// Only support llama-3-8b and llama-3-70b instruction models
|
||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
|
||||
var AwsModelIDMap = map[string]string{
|
||||
"llama3-8b-8192": "meta.llama3-8b-instruct-v1:0",
|
||||
"llama3-70b-8192": "meta.llama3-70b-instruct-v1:0",
|
||||
}
|
||||
|
||||
func awsModelID(requestModel string) (string, error) {
|
||||
if awsModelID, ok := AwsModelIDMap[requestModel]; ok {
|
||||
return awsModelID, nil
|
||||
}
|
||||
|
||||
return "", errors.Errorf("model %s not found", requestModel)
|
||||
}
|
||||
|
||||
// promptTemplate with range
|
||||
const promptTemplate = `<|begin_of_text|>{{range .Messages}}<|start_header_id|>{{.Role}}<|end_header_id|>{{.StringContent}}<|eot_id|>{{end}}<|start_header_id|>assistant<|end_header_id|>
|
||||
`
|
||||
|
||||
var promptTpl = template.Must(template.New("llama3-chat").Parse(promptTemplate))
|
||||
|
||||
func RenderPrompt(messages []relaymodel.Message) string {
|
||||
var buf bytes.Buffer
|
||||
err := promptTpl.Execute(&buf, struct{ Messages []relaymodel.Message }{messages})
|
||||
if err != nil {
|
||||
logger.SysError("error rendering prompt messages: " + err.Error())
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func ConvertRequest(textRequest relaymodel.GeneralOpenAIRequest) *Request {
|
||||
llamaRequest := Request{
|
||||
MaxGenLen: textRequest.MaxTokens,
|
||||
Temperature: textRequest.Temperature,
|
||||
TopP: textRequest.TopP,
|
||||
}
|
||||
if llamaRequest.MaxGenLen == 0 {
|
||||
llamaRequest.MaxGenLen = 2048
|
||||
}
|
||||
prompt := RenderPrompt(textRequest.Messages)
|
||||
llamaRequest.Prompt = prompt
|
||||
return &llamaRequest
|
||||
}
|
||||
|
||||
func Handler(c *gin.Context, awsCli *bedrockruntime.Client, modelName string) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) {
|
||||
awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel))
|
||||
if err != nil {
|
||||
return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
|
||||
}
|
||||
|
||||
awsReq := &bedrockruntime.InvokeModelInput{
|
||||
ModelId: aws.String(awsModelId),
|
||||
Accept: aws.String("application/json"),
|
||||
ContentType: aws.String("application/json"),
|
||||
}
|
||||
|
||||
llamaReq, ok := c.Get(ctxkey.ConvertedRequest)
|
||||
if !ok {
|
||||
return utils.WrapErr(errors.New("request not found")), nil
|
||||
}
|
||||
|
||||
awsReq.Body, err = json.Marshal(llamaReq)
|
||||
if err != nil {
|
||||
return utils.WrapErr(errors.Wrap(err, "marshal request")), nil
|
||||
}
|
||||
|
||||
awsResp, err := awsCli.InvokeModel(c.Request.Context(), awsReq)
|
||||
if err != nil {
|
||||
return utils.WrapErr(errors.Wrap(err, "InvokeModel")), nil
|
||||
}
|
||||
|
||||
var llamaResponse Response
|
||||
err = json.Unmarshal(awsResp.Body, &llamaResponse)
|
||||
if err != nil {
|
||||
return utils.WrapErr(errors.Wrap(err, "unmarshal response")), nil
|
||||
}
|
||||
|
||||
openaiResp := ResponseLlama2OpenAI(&llamaResponse)
|
||||
openaiResp.Model = modelName
|
||||
usage := relaymodel.Usage{
|
||||
PromptTokens: llamaResponse.PromptTokenCount,
|
||||
CompletionTokens: llamaResponse.GenerationTokenCount,
|
||||
TotalTokens: llamaResponse.PromptTokenCount + llamaResponse.GenerationTokenCount,
|
||||
}
|
||||
openaiResp.Usage = usage
|
||||
|
||||
c.JSON(http.StatusOK, openaiResp)
|
||||
return nil, &usage
|
||||
}
|
||||
|
||||
func ResponseLlama2OpenAI(llamaResponse *Response) *openai.TextResponse {
|
||||
var responseText string
|
||||
if len(llamaResponse.Generation) > 0 {
|
||||
responseText = llamaResponse.Generation
|
||||
}
|
||||
choice := openai.TextResponseChoice{
|
||||
Index: 0,
|
||||
Message: relaymodel.Message{
|
||||
Role: "assistant",
|
||||
Content: responseText,
|
||||
Name: nil,
|
||||
},
|
||||
FinishReason: llamaResponse.StopReason,
|
||||
}
|
||||
fullTextResponse := openai.TextResponse{
|
||||
Id: fmt.Sprintf("chatcmpl-%s", random.GetUUID()),
|
||||
Object: "chat.completion",
|
||||
Created: helper.GetTimestamp(),
|
||||
Choices: []openai.TextResponseChoice{choice},
|
||||
}
|
||||
return &fullTextResponse
|
||||
}
|
||||
|
||||
func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) {
|
||||
createdTime := helper.GetTimestamp()
|
||||
awsModelId, err := awsModelID(c.GetString(ctxkey.RequestModel))
|
||||
if err != nil {
|
||||
return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil
|
||||
}
|
||||
|
||||
awsReq := &bedrockruntime.InvokeModelWithResponseStreamInput{
|
||||
ModelId: aws.String(awsModelId),
|
||||
Accept: aws.String("application/json"),
|
||||
ContentType: aws.String("application/json"),
|
||||
}
|
||||
|
||||
llamaReq, ok := c.Get(ctxkey.ConvertedRequest)
|
||||
if !ok {
|
||||
return utils.WrapErr(errors.New("request not found")), nil
|
||||
}
|
||||
|
||||
awsReq.Body, err = json.Marshal(llamaReq)
|
||||
if err != nil {
|
||||
return utils.WrapErr(errors.Wrap(err, "marshal request")), nil
|
||||
}
|
||||
|
||||
awsResp, err := awsCli.InvokeModelWithResponseStream(c.Request.Context(), awsReq)
|
||||
if err != nil {
|
||||
return utils.WrapErr(errors.Wrap(err, "InvokeModelWithResponseStream")), nil
|
||||
}
|
||||
stream := awsResp.GetStream()
|
||||
defer stream.Close()
|
||||
|
||||
c.Writer.Header().Set("Content-Type", "text/event-stream")
|
||||
var usage relaymodel.Usage
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
event, ok := <-stream.Events()
|
||||
if !ok {
|
||||
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
|
||||
return false
|
||||
}
|
||||
|
||||
switch v := event.(type) {
|
||||
case *types.ResponseStreamMemberChunk:
|
||||
var llamaResp StreamResponse
|
||||
err := json.NewDecoder(bytes.NewReader(v.Value.Bytes)).Decode(&llamaResp)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
return false
|
||||
}
|
||||
|
||||
if llamaResp.PromptTokenCount > 0 {
|
||||
usage.PromptTokens = llamaResp.PromptTokenCount
|
||||
}
|
||||
if llamaResp.StopReason == "stop" {
|
||||
usage.CompletionTokens = llamaResp.GenerationTokenCount
|
||||
usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
|
||||
}
|
||||
response := StreamResponseLlama2OpenAI(&llamaResp)
|
||||
response.Id = fmt.Sprintf("chatcmpl-%s", random.GetUUID())
|
||||
response.Model = c.GetString(ctxkey.OriginalModel)
|
||||
response.Created = createdTime
|
||||
jsonStr, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonStr)})
|
||||
return true
|
||||
case *types.UnknownUnionMember:
|
||||
fmt.Println("unknown tag:", v.Tag)
|
||||
return false
|
||||
default:
|
||||
fmt.Println("union is nil or unknown type")
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
return nil, &usage
|
||||
}
|
||||
|
||||
func StreamResponseLlama2OpenAI(llamaResponse *StreamResponse) *openai.ChatCompletionsStreamResponse {
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = llamaResponse.Generation
|
||||
choice.Delta.Role = "assistant"
|
||||
finishReason := llamaResponse.StopReason
|
||||
if finishReason != "null" {
|
||||
choice.FinishReason = &finishReason
|
||||
}
|
||||
var openaiResponse openai.ChatCompletionsStreamResponse
|
||||
openaiResponse.Object = "chat.completion.chunk"
|
||||
openaiResponse.Choices = []openai.ChatCompletionsStreamResponseChoice{choice}
|
||||
return &openaiResponse
|
||||
}
|
||||
45
relay/adaptor/aws/llama3/main_test.go
Normal file
45
relay/adaptor/aws/llama3/main_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package aws_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
aws "github.com/songquanpeng/one-api/relay/adaptor/aws/llama3"
|
||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRenderPrompt(t *testing.T) {
|
||||
messages := []relaymodel.Message{
|
||||
{
|
||||
Role: "user",
|
||||
Content: "What's your name?",
|
||||
},
|
||||
}
|
||||
prompt := aws.RenderPrompt(messages)
|
||||
expected := `<|begin_of_text|><|start_header_id|>user<|end_header_id|>What's your name?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
|
||||
`
|
||||
assert.Equal(t, expected, prompt)
|
||||
|
||||
messages = []relaymodel.Message{
|
||||
{
|
||||
Role: "system",
|
||||
Content: "Your name is Kat. You are a detective.",
|
||||
},
|
||||
{
|
||||
Role: "user",
|
||||
Content: "What's your name?",
|
||||
},
|
||||
{
|
||||
Role: "assistant",
|
||||
Content: "Kat",
|
||||
},
|
||||
{
|
||||
Role: "user",
|
||||
Content: "What's your job?",
|
||||
},
|
||||
}
|
||||
prompt = aws.RenderPrompt(messages)
|
||||
expected = `<|begin_of_text|><|start_header_id|>system<|end_header_id|>Your name is Kat. You are a detective.<|eot_id|><|start_header_id|>user<|end_header_id|>What's your name?<|eot_id|><|start_header_id|>assistant<|end_header_id|>Kat<|eot_id|><|start_header_id|>user<|end_header_id|>What's your job?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
|
||||
`
|
||||
assert.Equal(t, expected, prompt)
|
||||
}
|
||||
29
relay/adaptor/aws/llama3/model.go
Normal file
29
relay/adaptor/aws/llama3/model.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package aws
|
||||
|
||||
// Request is the request to AWS Llama3
|
||||
//
|
||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-meta.html
|
||||
type Request struct {
|
||||
Prompt string `json:"prompt"`
|
||||
MaxGenLen int `json:"max_gen_len,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
}
|
||||
|
||||
// Response is the response from AWS Llama3
|
||||
//
|
||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-meta.html
|
||||
type Response struct {
|
||||
Generation string `json:"generation"`
|
||||
PromptTokenCount int `json:"prompt_token_count"`
|
||||
GenerationTokenCount int `json:"generation_token_count"`
|
||||
StopReason string `json:"stop_reason"`
|
||||
}
|
||||
|
||||
// {'generation': 'Hi', 'prompt_token_count': 15, 'generation_token_count': 1, 'stop_reason': None}
|
||||
type StreamResponse struct {
|
||||
Generation string `json:"generation"`
|
||||
PromptTokenCount int `json:"prompt_token_count"`
|
||||
GenerationTokenCount int `json:"generation_token_count"`
|
||||
StopReason string `json:"stop_reason"`
|
||||
}
|
||||
39
relay/adaptor/aws/registry.go
Normal file
39
relay/adaptor/aws/registry.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
claude "github.com/songquanpeng/one-api/relay/adaptor/aws/claude"
|
||||
llama3 "github.com/songquanpeng/one-api/relay/adaptor/aws/llama3"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/aws/utils"
|
||||
)
|
||||
|
||||
type AwsModelType int
|
||||
|
||||
const (
|
||||
AwsClaude AwsModelType = iota + 1
|
||||
AwsLlama3
|
||||
)
|
||||
|
||||
var (
|
||||
adaptors = map[string]AwsModelType{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
for model := range claude.AwsModelIDMap {
|
||||
adaptors[model] = AwsClaude
|
||||
}
|
||||
for model := range llama3.AwsModelIDMap {
|
||||
adaptors[model] = AwsLlama3
|
||||
}
|
||||
}
|
||||
|
||||
func GetAdaptor(model string) utils.AwsAdapter {
|
||||
adaptorType := adaptors[model]
|
||||
switch adaptorType {
|
||||
case AwsClaude:
|
||||
return &claude.Adaptor{}
|
||||
case AwsLlama3:
|
||||
return &llama3.Adaptor{}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
51
relay/adaptor/aws/utils/adaptor.go
Normal file
51
relay/adaptor/aws/utils/adaptor.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
type AwsAdapter interface {
|
||||
ConvertRequest(c *gin.Context, relayMode int, request *model.GeneralOpenAIRequest) (any, error)
|
||||
DoResponse(c *gin.Context, awsCli *bedrockruntime.Client, meta *meta.Meta) (usage *model.Usage, err *model.ErrorWithStatusCode)
|
||||
}
|
||||
|
||||
type Adaptor struct {
|
||||
Meta *meta.Meta
|
||||
AwsClient *bedrockruntime.Client
|
||||
}
|
||||
|
||||
func (a *Adaptor) Init(meta *meta.Meta) {
|
||||
a.Meta = meta
|
||||
a.AwsClient = bedrockruntime.New(bedrockruntime.Options{
|
||||
Region: meta.Config.Region,
|
||||
Credentials: aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(meta.Config.AK, meta.Config.SK, "")),
|
||||
})
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *meta.Meta) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) ConvertImageRequest(request *model.ImageRequest) (any, error) {
|
||||
if request == nil {
|
||||
return nil, errors.New("request is nil")
|
||||
}
|
||||
return request, nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) DoRequest(c *gin.Context, meta *meta.Meta, requestBody io.Reader) (*http.Response, error) {
|
||||
return nil, nil
|
||||
}
|
||||
16
relay/adaptor/aws/utils/utils.go
Normal file
16
relay/adaptor/aws/utils/utils.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
func WrapErr(err error) *relaymodel.ErrorWithStatusCode {
|
||||
return &relaymodel.ErrorWithStatusCode{
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Error: relaymodel.Error{
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,13 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/client"
|
||||
@@ -12,11 +19,6 @@ import (
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
"github.com/songquanpeng/one-api/relay/constant"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/flfmc9do2
|
||||
@@ -137,59 +139,41 @@ func embeddingResponseBaidu2OpenAI(response *EmbeddingResponse) *openai.Embeddin
|
||||
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
|
||||
var usage model.Usage
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
if i := strings.Index(string(data), "\n"); i >= 0 {
|
||||
return i + 1, data[0:i], nil
|
||||
}
|
||||
if atEOF {
|
||||
return len(data), data, nil
|
||||
}
|
||||
return 0, nil, nil
|
||||
})
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 6 { // ignore blank line or wrong format
|
||||
continue
|
||||
}
|
||||
data = data[6:]
|
||||
dataChan <- data
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
common.SetEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
var baiduResponse ChatStreamResponse
|
||||
err := json.Unmarshal([]byte(data), &baiduResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
if baiduResponse.Usage.TotalTokens != 0 {
|
||||
usage.TotalTokens = baiduResponse.Usage.TotalTokens
|
||||
usage.PromptTokens = baiduResponse.Usage.PromptTokens
|
||||
usage.CompletionTokens = baiduResponse.Usage.TotalTokens - baiduResponse.Usage.PromptTokens
|
||||
}
|
||||
response := streamResponseBaidu2OpenAI(&baiduResponse)
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
|
||||
return true
|
||||
case <-stopChan:
|
||||
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
|
||||
return false
|
||||
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 6 {
|
||||
continue
|
||||
}
|
||||
})
|
||||
data = data[6:]
|
||||
|
||||
var baiduResponse ChatStreamResponse
|
||||
err := json.Unmarshal([]byte(data), &baiduResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
continue
|
||||
}
|
||||
if baiduResponse.Usage.TotalTokens != 0 {
|
||||
usage.TotalTokens = baiduResponse.Usage.TotalTokens
|
||||
usage.PromptTokens = baiduResponse.Usage.PromptTokens
|
||||
usage.CompletionTokens = baiduResponse.Usage.TotalTokens - baiduResponse.Usage.PromptTokens
|
||||
}
|
||||
response := streamResponseBaidu2OpenAI(&baiduResponse)
|
||||
err = render.ObjectData(c, response)
|
||||
if err != nil {
|
||||
logger.SysError(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
logger.SysError("error reading stream: " + err.Error())
|
||||
}
|
||||
|
||||
render.Done(c)
|
||||
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
|
||||
@@ -5,11 +5,13 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor"
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||
)
|
||||
|
||||
type Adaptor struct {
|
||||
@@ -27,8 +29,33 @@ func (a *Adaptor) Init(meta *meta.Meta) {
|
||||
a.meta = meta
|
||||
}
|
||||
|
||||
// WorkerAI cannot be used across accounts with AIGateWay
|
||||
// https://developers.cloudflare.com/ai-gateway/providers/workersai/#openai-compatible-endpoints
|
||||
// https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_id}/workers-ai
|
||||
func (a *Adaptor) isAIGateWay(baseURL string) bool {
|
||||
return strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") && strings.HasSuffix(baseURL, "/workers-ai")
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
return fmt.Sprintf("%s/client/v4/accounts/%s/ai/run/%s", meta.BaseURL, meta.Config.UserID, meta.ActualModelName), nil
|
||||
isAIGateWay := a.isAIGateWay(meta.BaseURL)
|
||||
var urlPrefix string
|
||||
if isAIGateWay {
|
||||
urlPrefix = meta.BaseURL
|
||||
} else {
|
||||
urlPrefix = fmt.Sprintf("%s/client/v4/accounts/%s/ai", meta.BaseURL, meta.Config.UserID)
|
||||
}
|
||||
|
||||
switch meta.Mode {
|
||||
case relaymode.ChatCompletions:
|
||||
return fmt.Sprintf("%s/v1/chat/completions", urlPrefix), nil
|
||||
case relaymode.Embeddings:
|
||||
return fmt.Sprintf("%s/v1/embeddings", urlPrefix), nil
|
||||
default:
|
||||
if isAIGateWay {
|
||||
return fmt.Sprintf("%s/%s", urlPrefix, meta.ActualModelName), nil
|
||||
}
|
||||
return fmt.Sprintf("%s/run/%s", urlPrefix, meta.ActualModelName), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *meta.Meta) error {
|
||||
@@ -41,7 +68,14 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
|
||||
if request == nil {
|
||||
return nil, errors.New("request is nil")
|
||||
}
|
||||
return ConvertRequest(*request), nil
|
||||
switch relayMode {
|
||||
case relaymode.Completions:
|
||||
return ConvertCompletionsRequest(*request), nil
|
||||
case relaymode.ChatCompletions, relaymode.Embeddings:
|
||||
return request, nil
|
||||
default:
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Adaptor) DoRequest(c *gin.Context, meta *meta.Meta, requestBody io.Reader) (*http.Response, error) {
|
||||
|
||||
@@ -2,12 +2,14 @@ package cloudflare
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
@@ -16,114 +18,66 @@ import (
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
|
||||
var promptBuilder strings.Builder
|
||||
for _, message := range textRequest.Messages {
|
||||
promptBuilder.WriteString(message.StringContent())
|
||||
promptBuilder.WriteString("\n") // 添加换行符来分隔每个消息
|
||||
}
|
||||
|
||||
return &Request{
|
||||
MaxTokens: textRequest.MaxTokens,
|
||||
Prompt: promptBuilder.String(),
|
||||
Stream: textRequest.Stream,
|
||||
Temperature: textRequest.Temperature,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func ResponseCloudflare2OpenAI(cloudflareResponse *Response) *openai.TextResponse {
|
||||
choice := openai.TextResponseChoice{
|
||||
Index: 0,
|
||||
Message: model.Message{
|
||||
Role: "assistant",
|
||||
Content: cloudflareResponse.Result.Response,
|
||||
},
|
||||
FinishReason: "stop",
|
||||
func ConvertCompletionsRequest(textRequest model.GeneralOpenAIRequest) *Request {
|
||||
p, _ := textRequest.Prompt.(string)
|
||||
return &Request{
|
||||
Prompt: p,
|
||||
MaxTokens: textRequest.MaxTokens,
|
||||
Stream: textRequest.Stream,
|
||||
Temperature: textRequest.Temperature,
|
||||
}
|
||||
fullTextResponse := openai.TextResponse{
|
||||
Object: "chat.completion",
|
||||
Created: helper.GetTimestamp(),
|
||||
Choices: []openai.TextResponseChoice{choice},
|
||||
}
|
||||
return &fullTextResponse
|
||||
}
|
||||
|
||||
func StreamResponseCloudflare2OpenAI(cloudflareResponse *StreamResponse) *openai.ChatCompletionsStreamResponse {
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = cloudflareResponse.Response
|
||||
choice.Delta.Role = "assistant"
|
||||
openaiResponse := openai.ChatCompletionsStreamResponse{
|
||||
Object: "chat.completion.chunk",
|
||||
Choices: []openai.ChatCompletionsStreamResponseChoice{choice},
|
||||
Created: helper.GetTimestamp(),
|
||||
}
|
||||
return &openaiResponse
|
||||
}
|
||||
|
||||
func StreamHandler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) {
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
if i := bytes.IndexByte(data, '\n'); i >= 0 {
|
||||
return i + 1, data[0:i], nil
|
||||
}
|
||||
if atEOF {
|
||||
return len(data), data, nil
|
||||
}
|
||||
return 0, nil, nil
|
||||
})
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < len("data: ") {
|
||||
continue
|
||||
}
|
||||
data = strings.TrimPrefix(data, "data: ")
|
||||
dataChan <- data
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
common.SetEventStreamHeaders(c)
|
||||
id := helper.GetResponseID(c)
|
||||
responseModel := c.GetString("original_model")
|
||||
responseModel := c.GetString(ctxkey.OriginalModel)
|
||||
var responseText string
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
// some implementations may add \r at the end of data
|
||||
data = strings.TrimSuffix(data, "\r")
|
||||
var cloudflareResponse StreamResponse
|
||||
err := json.Unmarshal([]byte(data), &cloudflareResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
response := StreamResponseCloudflare2OpenAI(&cloudflareResponse)
|
||||
if response == nil {
|
||||
return true
|
||||
}
|
||||
responseText += cloudflareResponse.Response
|
||||
response.Id = id
|
||||
response.Model = responseModel
|
||||
jsonStr, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonStr)})
|
||||
return true
|
||||
case <-stopChan:
|
||||
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
|
||||
return false
|
||||
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < len("data: ") {
|
||||
continue
|
||||
}
|
||||
})
|
||||
_ = resp.Body.Close()
|
||||
data = strings.TrimPrefix(data, "data: ")
|
||||
data = strings.TrimSuffix(data, "\r")
|
||||
|
||||
if data == "[DONE]" {
|
||||
break
|
||||
}
|
||||
|
||||
var response openai.ChatCompletionsStreamResponse
|
||||
err := json.Unmarshal([]byte(data), &response)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
continue
|
||||
}
|
||||
for _, v := range response.Choices {
|
||||
v.Delta.Role = "assistant"
|
||||
responseText += v.Delta.StringContent()
|
||||
}
|
||||
response.Id = id
|
||||
response.Model = modelName
|
||||
err = render.ObjectData(c, response)
|
||||
if err != nil {
|
||||
logger.SysError(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
logger.SysError("error reading stream: " + err.Error())
|
||||
}
|
||||
|
||||
render.Done(c)
|
||||
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
|
||||
usage := openai.ResponseText2Usage(responseText, responseModel, promptTokens)
|
||||
return nil, usage
|
||||
}
|
||||
@@ -137,22 +91,25 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName st
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
var cloudflareResponse Response
|
||||
err = json.Unmarshal(responseBody, &cloudflareResponse)
|
||||
var response openai.TextResponse
|
||||
err = json.Unmarshal(responseBody, &response)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
fullTextResponse := ResponseCloudflare2OpenAI(&cloudflareResponse)
|
||||
fullTextResponse.Model = modelName
|
||||
usage := openai.ResponseText2Usage(cloudflareResponse.Result.Response, modelName, promptTokens)
|
||||
fullTextResponse.Usage = *usage
|
||||
fullTextResponse.Id = helper.GetResponseID(c)
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
response.Model = modelName
|
||||
var responseText string
|
||||
for _, v := range response.Choices {
|
||||
responseText += v.Message.Content.(string)
|
||||
}
|
||||
usage := openai.ResponseText2Usage(responseText, modelName, promptTokens)
|
||||
response.Usage = *usage
|
||||
response.Id = helper.GetResponseID(c)
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
c.Writer.Header().Set("Content-Type", "application/json")
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
_, err = c.Writer.Write(jsonResponse)
|
||||
_, _ = c.Writer.Write(jsonResponse)
|
||||
return nil, usage
|
||||
}
|
||||
|
||||
@@ -1,25 +1,13 @@
|
||||
package cloudflare
|
||||
|
||||
import "github.com/songquanpeng/one-api/relay/model"
|
||||
|
||||
type Request struct {
|
||||
Lora string `json:"lora,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
Raw bool `json:"raw,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
Response string `json:"response"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
Result Result `json:"result"`
|
||||
Success bool `json:"success"`
|
||||
Errors []string `json:"errors"`
|
||||
Messages []string `json:"messages"`
|
||||
}
|
||||
|
||||
type StreamResponse struct {
|
||||
Response string `json:"response"`
|
||||
Messages []model.Message `json:"messages,omitempty"`
|
||||
Lora string `json:"lora,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
Raw bool `json:"raw,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
}
|
||||
|
||||
@@ -2,9 +2,9 @@ package cohere
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -134,66 +134,53 @@ func ResponseCohere2OpenAI(cohereResponse *Response) *openai.TextResponse {
|
||||
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
|
||||
createdTime := helper.GetTimestamp()
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
if i := bytes.IndexByte(data, '\n'); i >= 0 {
|
||||
return i + 1, data[0:i], nil
|
||||
}
|
||||
if atEOF {
|
||||
return len(data), data, nil
|
||||
}
|
||||
return 0, nil, nil
|
||||
})
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
dataChan <- data
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
common.SetEventStreamHeaders(c)
|
||||
var usage model.Usage
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
// some implementations may add \r at the end of data
|
||||
data = strings.TrimSuffix(data, "\r")
|
||||
var cohereResponse StreamResponse
|
||||
err := json.Unmarshal([]byte(data), &cohereResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
response, meta := StreamResponseCohere2OpenAI(&cohereResponse)
|
||||
if meta != nil {
|
||||
usage.PromptTokens += meta.Meta.Tokens.InputTokens
|
||||
usage.CompletionTokens += meta.Meta.Tokens.OutputTokens
|
||||
return true
|
||||
}
|
||||
if response == nil {
|
||||
return true
|
||||
}
|
||||
response.Id = fmt.Sprintf("chatcmpl-%d", createdTime)
|
||||
response.Model = c.GetString("original_model")
|
||||
response.Created = createdTime
|
||||
jsonStr, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonStr)})
|
||||
return true
|
||||
case <-stopChan:
|
||||
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
|
||||
return false
|
||||
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
data = strings.TrimSuffix(data, "\r")
|
||||
|
||||
var cohereResponse StreamResponse
|
||||
err := json.Unmarshal([]byte(data), &cohereResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
continue
|
||||
}
|
||||
})
|
||||
_ = resp.Body.Close()
|
||||
|
||||
response, meta := StreamResponseCohere2OpenAI(&cohereResponse)
|
||||
if meta != nil {
|
||||
usage.PromptTokens += meta.Meta.Tokens.InputTokens
|
||||
usage.CompletionTokens += meta.Meta.Tokens.OutputTokens
|
||||
continue
|
||||
}
|
||||
if response == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
response.Id = fmt.Sprintf("chatcmpl-%d", createdTime)
|
||||
response.Model = c.GetString("original_model")
|
||||
response.Created = createdTime
|
||||
|
||||
err = render.ObjectData(c, response)
|
||||
if err != nil {
|
||||
logger.SysError(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
logger.SysError("error reading stream: " + err.Error())
|
||||
}
|
||||
|
||||
render.Done(c)
|
||||
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
|
||||
return nil, &usage
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,11 @@ import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/conv"
|
||||
@@ -12,9 +17,6 @@ import (
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/coze/constant/messagetype"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// https://www.coze.com/open
|
||||
@@ -109,69 +111,54 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
||||
var responseText string
|
||||
createdTime := helper.GetTimestamp()
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
if i := strings.Index(string(data), "\n"); i >= 0 {
|
||||
return i + 1, data[0:i], nil
|
||||
}
|
||||
if atEOF {
|
||||
return len(data), data, nil
|
||||
}
|
||||
return 0, nil, nil
|
||||
})
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 5 {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(data, "data:") {
|
||||
continue
|
||||
}
|
||||
data = strings.TrimPrefix(data, "data:")
|
||||
dataChan <- data
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
common.SetEventStreamHeaders(c)
|
||||
var modelName string
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
// some implementations may add \r at the end of data
|
||||
data = strings.TrimSuffix(data, "\r")
|
||||
var cozeResponse StreamResponse
|
||||
err := json.Unmarshal([]byte(data), &cozeResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
response, _ := StreamResponseCoze2OpenAI(&cozeResponse)
|
||||
if response == nil {
|
||||
return true
|
||||
}
|
||||
for _, choice := range response.Choices {
|
||||
responseText += conv.AsString(choice.Delta.Content)
|
||||
}
|
||||
response.Model = modelName
|
||||
response.Created = createdTime
|
||||
jsonStr, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonStr)})
|
||||
return true
|
||||
case <-stopChan:
|
||||
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
|
||||
return false
|
||||
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 5 || !strings.HasPrefix(data, "data:") {
|
||||
continue
|
||||
}
|
||||
})
|
||||
_ = resp.Body.Close()
|
||||
data = strings.TrimPrefix(data, "data:")
|
||||
data = strings.TrimSuffix(data, "\r")
|
||||
|
||||
var cozeResponse StreamResponse
|
||||
err := json.Unmarshal([]byte(data), &cozeResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
response, _ := StreamResponseCoze2OpenAI(&cozeResponse)
|
||||
if response == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, choice := range response.Choices {
|
||||
responseText += conv.AsString(choice.Delta.Content)
|
||||
}
|
||||
response.Model = modelName
|
||||
response.Created = createdTime
|
||||
|
||||
err = render.ObjectData(c, response)
|
||||
if err != nil {
|
||||
logger.SysError(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
logger.SysError("error reading stream: " + err.Error())
|
||||
}
|
||||
|
||||
render.Done(c)
|
||||
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
|
||||
return nil, &responseText
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -275,64 +276,50 @@ func embeddingResponseGemini2OpenAI(response *EmbeddingResponse) *openai.Embeddi
|
||||
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, string) {
|
||||
responseText := ""
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
if i := strings.Index(string(data), "\n"); i >= 0 {
|
||||
return i + 1, data[0:i], nil
|
||||
}
|
||||
if atEOF {
|
||||
return len(data), data, nil
|
||||
}
|
||||
return 0, nil, nil
|
||||
})
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
data = strings.TrimSpace(data)
|
||||
if !strings.HasPrefix(data, "data: ") {
|
||||
continue
|
||||
}
|
||||
data = strings.TrimPrefix(data, "data: ")
|
||||
data = strings.TrimSuffix(data, "\"")
|
||||
dataChan <- data
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
common.SetEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
var geminiResponse ChatResponse
|
||||
err := json.Unmarshal([]byte(data), &geminiResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
response := streamResponseGeminiChat2OpenAI(&geminiResponse)
|
||||
if response == nil {
|
||||
return true
|
||||
}
|
||||
responseText += response.Choices[0].Delta.StringContent()
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
|
||||
return true
|
||||
case <-stopChan:
|
||||
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
|
||||
return false
|
||||
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
data = strings.TrimSpace(data)
|
||||
if !strings.HasPrefix(data, "data: ") {
|
||||
continue
|
||||
}
|
||||
})
|
||||
data = strings.TrimPrefix(data, "data: ")
|
||||
data = strings.TrimSuffix(data, "\"")
|
||||
|
||||
var geminiResponse ChatResponse
|
||||
err := json.Unmarshal([]byte(data), &geminiResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
response := streamResponseGeminiChat2OpenAI(&geminiResponse)
|
||||
if response == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
responseText += response.Choices[0].Delta.StringContent()
|
||||
|
||||
err = render.ObjectData(c, response)
|
||||
if err != nil {
|
||||
logger.SysError(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
logger.SysError("error reading stream: " + err.Error())
|
||||
}
|
||||
|
||||
render.Done(c)
|
||||
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
|
||||
return nil, responseText
|
||||
}
|
||||
|
||||
|
||||
19
relay/adaptor/novita/constants.go
Normal file
19
relay/adaptor/novita/constants.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package novita
|
||||
|
||||
// https://novita.ai/llm-api
|
||||
|
||||
var ModelList = []string{
|
||||
"meta-llama/llama-3-8b-instruct",
|
||||
"meta-llama/llama-3-70b-instruct",
|
||||
"nousresearch/hermes-2-pro-llama-3-8b",
|
||||
"nousresearch/nous-hermes-llama2-13b",
|
||||
"mistralai/mistral-7b-instruct",
|
||||
"cognitivecomputations/dolphin-mixtral-8x22b",
|
||||
"sao10k/l3-70b-euryale-v2.1",
|
||||
"sophosympatheia/midnight-rose-70b",
|
||||
"gryphe/mythomax-l2-13b",
|
||||
"Nous-Hermes-2-Mixtral-8x7B-DPO",
|
||||
"lzlv_70b",
|
||||
"teknium/openhermes-2.5-mistral-7b",
|
||||
"microsoft/wizardlm-2-8x22b",
|
||||
}
|
||||
15
relay/adaptor/novita/main.go
Normal file
15
relay/adaptor/novita/main.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package novita
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||
)
|
||||
|
||||
func GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
if meta.Mode == relaymode.ChatCompletions {
|
||||
return fmt.Sprintf("%s/chat/completions", meta.BaseURL), nil
|
||||
}
|
||||
return "", fmt.Errorf("unsupported relay mode %d for novita", meta.Mode)
|
||||
}
|
||||
@@ -5,12 +5,14 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/random"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/random"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/image"
|
||||
@@ -105,54 +107,51 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
||||
return 0, nil, nil
|
||||
}
|
||||
if i := strings.Index(string(data), "}\n"); i >= 0 {
|
||||
return i + 2, data[0:i], nil
|
||||
return i + 2, data[0 : i+1], nil
|
||||
}
|
||||
if atEOF {
|
||||
return len(data), data, nil
|
||||
}
|
||||
return 0, nil, nil
|
||||
})
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := strings.TrimPrefix(scanner.Text(), "}")
|
||||
dataChan <- data + "}"
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
|
||||
common.SetEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
var ollamaResponse ChatResponse
|
||||
err := json.Unmarshal([]byte(data), &ollamaResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
if ollamaResponse.EvalCount != 0 {
|
||||
usage.PromptTokens = ollamaResponse.PromptEvalCount
|
||||
usage.CompletionTokens = ollamaResponse.EvalCount
|
||||
usage.TotalTokens = ollamaResponse.PromptEvalCount + ollamaResponse.EvalCount
|
||||
}
|
||||
response := streamResponseOllama2OpenAI(&ollamaResponse)
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
|
||||
return true
|
||||
case <-stopChan:
|
||||
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
|
||||
return false
|
||||
|
||||
for scanner.Scan() {
|
||||
data := strings.TrimPrefix(scanner.Text(), "}")
|
||||
data = data + "}"
|
||||
|
||||
var ollamaResponse ChatResponse
|
||||
err := json.Unmarshal([]byte(data), &ollamaResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
continue
|
||||
}
|
||||
})
|
||||
|
||||
if ollamaResponse.EvalCount != 0 {
|
||||
usage.PromptTokens = ollamaResponse.PromptEvalCount
|
||||
usage.CompletionTokens = ollamaResponse.EvalCount
|
||||
usage.TotalTokens = ollamaResponse.PromptEvalCount + ollamaResponse.EvalCount
|
||||
}
|
||||
|
||||
response := streamResponseOllama2OpenAI(&ollamaResponse)
|
||||
err = render.ObjectData(c, response)
|
||||
if err != nil {
|
||||
logger.SysError(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
logger.SysError("error reading stream: " + err.Error())
|
||||
}
|
||||
|
||||
render.Done(c)
|
||||
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
|
||||
return nil, &usage
|
||||
}
|
||||
|
||||
|
||||
@@ -3,17 +3,19 @@ package openai
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/doubao"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/minimax"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/novita"
|
||||
"github.com/songquanpeng/one-api/relay/channeltype"
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Adaptor struct {
|
||||
@@ -48,6 +50,8 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
return minimax.GetRequestURL(meta)
|
||||
case channeltype.Doubao:
|
||||
return doubao.GetRequestURL(meta)
|
||||
case channeltype.Novita:
|
||||
return novita.GetRequestURL(meta)
|
||||
default:
|
||||
return GetFullRequestURL(meta.BaseURL, meta.RequestURLPath, meta.ChannelType), nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/minimax"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/mistral"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/moonshot"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/novita"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/stepfun"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/togetherai"
|
||||
"github.com/songquanpeng/one-api/relay/channeltype"
|
||||
@@ -28,6 +29,7 @@ var CompatibleChannels = []int{
|
||||
channeltype.StepFun,
|
||||
channeltype.DeepSeek,
|
||||
channeltype.TogetherAI,
|
||||
channeltype.Novita,
|
||||
}
|
||||
|
||||
func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
||||
@@ -56,6 +58,8 @@ func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
||||
return "together.ai", togetherai.ModelList
|
||||
case channeltype.Doubao:
|
||||
return "doubao", doubao.ModelList
|
||||
case channeltype.Novita:
|
||||
return "novita", novita.ModelList
|
||||
default:
|
||||
return "openai", ModelList
|
||||
}
|
||||
|
||||
@@ -4,15 +4,18 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/conv"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -24,88 +27,72 @@ const (
|
||||
func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.ErrorWithStatusCode, string, *model.Usage) {
|
||||
responseText := ""
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
if i := strings.Index(string(data), "\n"); i >= 0 {
|
||||
return i + 1, data[0:i], nil
|
||||
}
|
||||
if atEOF {
|
||||
return len(data), data, nil
|
||||
}
|
||||
return 0, nil, nil
|
||||
})
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
var usage *model.Usage
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < dataPrefixLength { // ignore blank line or wrong format
|
||||
continue
|
||||
}
|
||||
if data[:dataPrefixLength] != dataPrefix && data[:dataPrefixLength] != done {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(data[dataPrefixLength:], done) {
|
||||
dataChan <- data
|
||||
continue
|
||||
}
|
||||
switch relayMode {
|
||||
case relaymode.ChatCompletions:
|
||||
var streamResponse ChatCompletionsStreamResponse
|
||||
err := json.Unmarshal([]byte(data[dataPrefixLength:]), &streamResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
dataChan <- data // if error happened, pass the data to client
|
||||
continue // just ignore the error
|
||||
}
|
||||
if len(streamResponse.Choices) == 0 {
|
||||
// but for empty choice, we should not pass it to client, this is for azure
|
||||
continue // just ignore empty choice
|
||||
}
|
||||
dataChan <- data
|
||||
for _, choice := range streamResponse.Choices {
|
||||
responseText += conv.AsString(choice.Delta.Content)
|
||||
}
|
||||
if streamResponse.Usage != nil {
|
||||
usage = streamResponse.Usage
|
||||
}
|
||||
case relaymode.Completions:
|
||||
dataChan <- data
|
||||
var streamResponse CompletionsStreamResponse
|
||||
err := json.Unmarshal([]byte(data[dataPrefixLength:]), &streamResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
continue
|
||||
}
|
||||
for _, choice := range streamResponse.Choices {
|
||||
responseText += choice.Text
|
||||
}
|
||||
}
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
|
||||
common.SetEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
if strings.HasPrefix(data, "data: [DONE]") {
|
||||
data = data[:12]
|
||||
}
|
||||
// some implementations may add \r at the end of data
|
||||
data = strings.TrimSuffix(data, "\r")
|
||||
c.Render(-1, common.CustomEvent{Data: data})
|
||||
return true
|
||||
case <-stopChan:
|
||||
return false
|
||||
|
||||
doneRendered := false
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < dataPrefixLength { // ignore blank line or wrong format
|
||||
continue
|
||||
}
|
||||
})
|
||||
if data[:dataPrefixLength] != dataPrefix && data[:dataPrefixLength] != done {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(data[dataPrefixLength:], done) {
|
||||
render.StringData(c, data)
|
||||
doneRendered = true
|
||||
continue
|
||||
}
|
||||
switch relayMode {
|
||||
case relaymode.ChatCompletions:
|
||||
var streamResponse ChatCompletionsStreamResponse
|
||||
err := json.Unmarshal([]byte(data[dataPrefixLength:]), &streamResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
render.StringData(c, data) // if error happened, pass the data to client
|
||||
continue // just ignore the error
|
||||
}
|
||||
if len(streamResponse.Choices) == 0 {
|
||||
// but for empty choice, we should not pass it to client, this is for azure
|
||||
continue // just ignore empty choice
|
||||
}
|
||||
render.StringData(c, data)
|
||||
for _, choice := range streamResponse.Choices {
|
||||
responseText += conv.AsString(choice.Delta.Content)
|
||||
}
|
||||
if streamResponse.Usage != nil {
|
||||
usage = streamResponse.Usage
|
||||
}
|
||||
case relaymode.Completions:
|
||||
render.StringData(c, data)
|
||||
var streamResponse CompletionsStreamResponse
|
||||
err := json.Unmarshal([]byte(data[dataPrefixLength:]), &streamResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
continue
|
||||
}
|
||||
for _, choice := range streamResponse.Choices {
|
||||
responseText += choice.Text
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
logger.SysError("error reading stream: " + err.Error())
|
||||
}
|
||||
|
||||
if !doneRendered {
|
||||
render.Done(c)
|
||||
}
|
||||
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), "", nil
|
||||
}
|
||||
|
||||
return nil, responseText, usage
|
||||
}
|
||||
|
||||
@@ -149,7 +136,7 @@ func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName st
|
||||
return ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
|
||||
if textResponse.Usage.TotalTokens == 0 {
|
||||
if textResponse.Usage.TotalTokens == 0 || (textResponse.Usage.PromptTokens == 0 && textResponse.Usage.CompletionTokens == 0) {
|
||||
completionTokens := 0
|
||||
for _, choice := range textResponse.Choices {
|
||||
completionTokens += CountTokenText(choice.Message.StringContent(), modelName)
|
||||
|
||||
@@ -3,6 +3,10 @@ package palm
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
@@ -11,8 +15,6 @@ import (
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
"github.com/songquanpeng/one-api/relay/constant"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage#request-body
|
||||
@@ -77,58 +79,51 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
||||
responseText := ""
|
||||
responseId := fmt.Sprintf("chatcmpl-%s", random.GetUUID())
|
||||
createdTime := helper.GetTimestamp()
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
logger.SysError("error reading stream response: " + err.Error())
|
||||
stopChan <- true
|
||||
return
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
logger.SysError("error closing stream response: " + err.Error())
|
||||
stopChan <- true
|
||||
return
|
||||
}
|
||||
var palmResponse ChatResponse
|
||||
err = json.Unmarshal(responseBody, &palmResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
stopChan <- true
|
||||
return
|
||||
}
|
||||
fullTextResponse := streamResponsePaLM2OpenAI(&palmResponse)
|
||||
fullTextResponse.Id = responseId
|
||||
fullTextResponse.Created = createdTime
|
||||
if len(palmResponse.Candidates) > 0 {
|
||||
responseText = palmResponse.Candidates[0].Content
|
||||
}
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
stopChan <- true
|
||||
return
|
||||
}
|
||||
dataChan <- string(jsonResponse)
|
||||
stopChan <- true
|
||||
}()
|
||||
|
||||
common.SetEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + data})
|
||||
return true
|
||||
case <-stopChan:
|
||||
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
|
||||
return false
|
||||
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
logger.SysError("error reading stream response: " + err.Error())
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
})
|
||||
err := resp.Body.Close()
|
||||
return openai.ErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
|
||||
var palmResponse ChatResponse
|
||||
err = json.Unmarshal(responseBody, &palmResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
return openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
|
||||
fullTextResponse := streamResponsePaLM2OpenAI(&palmResponse)
|
||||
fullTextResponse.Id = responseId
|
||||
fullTextResponse.Created = createdTime
|
||||
if len(palmResponse.Candidates) > 0 {
|
||||
responseText = palmResponse.Candidates[0].Content
|
||||
}
|
||||
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
|
||||
err = render.ObjectData(c, string(jsonResponse))
|
||||
if err != nil {
|
||||
logger.SysError(err.Error())
|
||||
}
|
||||
|
||||
render.Done(c)
|
||||
|
||||
return nil, responseText
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,13 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/conv"
|
||||
@@ -17,11 +24,6 @@ import (
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
"github.com/songquanpeng/one-api/relay/constant"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
|
||||
@@ -87,64 +89,46 @@ func streamResponseTencent2OpenAI(TencentResponse *ChatResponse) *openai.ChatCom
|
||||
func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, string) {
|
||||
var responseText string
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
if i := strings.Index(string(data), "\n"); i >= 0 {
|
||||
return i + 1, data[0:i], nil
|
||||
}
|
||||
if atEOF {
|
||||
return len(data), data, nil
|
||||
}
|
||||
return 0, nil, nil
|
||||
})
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 5 { // ignore blank line or wrong format
|
||||
continue
|
||||
}
|
||||
if data[:5] != "data:" {
|
||||
continue
|
||||
}
|
||||
data = data[5:]
|
||||
dataChan <- data
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
common.SetEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
var TencentResponse ChatResponse
|
||||
err := json.Unmarshal([]byte(data), &TencentResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
response := streamResponseTencent2OpenAI(&TencentResponse)
|
||||
if len(response.Choices) != 0 {
|
||||
responseText += conv.AsString(response.Choices[0].Delta.Content)
|
||||
}
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
|
||||
return true
|
||||
case <-stopChan:
|
||||
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
|
||||
return false
|
||||
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 5 || !strings.HasPrefix(data, "data:") {
|
||||
continue
|
||||
}
|
||||
})
|
||||
data = strings.TrimPrefix(data, "data:")
|
||||
|
||||
var tencentResponse ChatResponse
|
||||
err := json.Unmarshal([]byte(data), &tencentResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
response := streamResponseTencent2OpenAI(&tencentResponse)
|
||||
if len(response.Choices) != 0 {
|
||||
responseText += conv.AsString(response.Choices[0].Delta.Content)
|
||||
}
|
||||
|
||||
err = render.ObjectData(c, response)
|
||||
if err != nil {
|
||||
logger.SysError(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
logger.SysError("error reading stream: " + err.Error())
|
||||
}
|
||||
|
||||
render.Done(c)
|
||||
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
|
||||
return nil, responseText
|
||||
}
|
||||
|
||||
|
||||
@@ -6,4 +6,5 @@ var ModelList = []string{
|
||||
"SparkDesk-v2.1",
|
||||
"SparkDesk-v3.1",
|
||||
"SparkDesk-v3.5",
|
||||
"SparkDesk-v4.0",
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func requestOpenAI2Xunfei(request model.GeneralOpenAIRequest, xunfeiAppId string
|
||||
xunfeiRequest.Parameter.Chat.MaxTokens = request.MaxTokens
|
||||
xunfeiRequest.Payload.Message.Text = messages
|
||||
|
||||
if strings.HasPrefix(domain, "generalv3") {
|
||||
if strings.HasPrefix(domain, "generalv3") || domain == "4.0Ultra" {
|
||||
functions := make([]model.Function, len(request.Tools))
|
||||
for i, tool := range request.Tools {
|
||||
functions[i] = tool.Function
|
||||
@@ -290,6 +290,8 @@ func apiVersion2domain(apiVersion string) string {
|
||||
return "generalv3"
|
||||
case "v3.5":
|
||||
return "generalv3.5"
|
||||
case "v4.0":
|
||||
return "4.0Ultra"
|
||||
}
|
||||
return "general" + apiVersion
|
||||
}
|
||||
|
||||
@@ -3,6 +3,13 @@ package zhipu
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"github.com/songquanpeng/one-api/common/render"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/golang-jwt/jwt"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
@@ -11,11 +18,6 @@ import (
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
"github.com/songquanpeng/one-api/relay/constant"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// https://open.bigmodel.cn/doc/api#chatglm_std
|
||||
@@ -155,66 +157,55 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
||||
}
|
||||
return 0, nil, nil
|
||||
})
|
||||
dataChan := make(chan string)
|
||||
metaChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
lines := strings.Split(data, "\n")
|
||||
for i, line := range lines {
|
||||
if len(line) < 5 {
|
||||
|
||||
common.SetEventStreamHeaders(c)
|
||||
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
lines := strings.Split(data, "\n")
|
||||
for i, line := range lines {
|
||||
if len(line) < 5 {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(line, "data:") {
|
||||
dataSegment := line[5:]
|
||||
if i != len(lines)-1 {
|
||||
dataSegment += "\n"
|
||||
}
|
||||
response := streamResponseZhipu2OpenAI(dataSegment)
|
||||
err := render.ObjectData(c, response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
}
|
||||
} else if strings.HasPrefix(line, "meta:") {
|
||||
metaSegment := line[5:]
|
||||
var zhipuResponse StreamMetaResponse
|
||||
err := json.Unmarshal([]byte(metaSegment), &zhipuResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
continue
|
||||
}
|
||||
if line[:5] == "data:" {
|
||||
dataChan <- line[5:]
|
||||
if i != len(lines)-1 {
|
||||
dataChan <- "\n"
|
||||
}
|
||||
} else if line[:5] == "meta:" {
|
||||
metaChan <- line[5:]
|
||||
response, zhipuUsage := streamMetaResponseZhipu2OpenAI(&zhipuResponse)
|
||||
err = render.ObjectData(c, response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
}
|
||||
usage = zhipuUsage
|
||||
}
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
common.SetEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
response := streamResponseZhipu2OpenAI(data)
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
|
||||
return true
|
||||
case data := <-metaChan:
|
||||
var zhipuResponse StreamMetaResponse
|
||||
err := json.Unmarshal([]byte(data), &zhipuResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
response, zhipuUsage := streamMetaResponseZhipu2OpenAI(&zhipuResponse)
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
usage = zhipuUsage
|
||||
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
|
||||
return true
|
||||
case <-stopChan:
|
||||
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
|
||||
return false
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
logger.SysError("error reading stream: " + err.Error())
|
||||
}
|
||||
|
||||
render.Done(c)
|
||||
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
return openai.ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
|
||||
return nil, usage
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package ratio
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
@@ -70,12 +71,13 @@ var ModelRatio = map[string]float64{
|
||||
"dall-e-2": 0.02 * USD, // $0.016 - $0.020 / image
|
||||
"dall-e-3": 0.04 * USD, // $0.040 - $0.120 / image
|
||||
// https://www.anthropic.com/api#pricing
|
||||
"claude-instant-1.2": 0.8 / 1000 * USD,
|
||||
"claude-2.0": 8.0 / 1000 * USD,
|
||||
"claude-2.1": 8.0 / 1000 * USD,
|
||||
"claude-3-haiku-20240307": 0.25 / 1000 * USD,
|
||||
"claude-3-sonnet-20240229": 3.0 / 1000 * USD,
|
||||
"claude-3-opus-20240229": 15.0 / 1000 * USD,
|
||||
"claude-instant-1.2": 0.8 / 1000 * USD,
|
||||
"claude-2.0": 8.0 / 1000 * USD,
|
||||
"claude-2.1": 8.0 / 1000 * USD,
|
||||
"claude-3-haiku-20240307": 0.25 / 1000 * USD,
|
||||
"claude-3-sonnet-20240229": 3.0 / 1000 * USD,
|
||||
"claude-3-5-sonnet-20240620": 3.0 / 1000 * USD,
|
||||
"claude-3-opus-20240229": 15.0 / 1000 * USD,
|
||||
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7
|
||||
"ERNIE-4.0-8K": 0.120 * RMB,
|
||||
"ERNIE-3.5-8K": 0.012 * RMB,
|
||||
@@ -124,6 +126,7 @@ var ModelRatio = map[string]float64{
|
||||
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v4.0": 1.2858, // ¥0.018 / 1k tokens
|
||||
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
|
||||
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
@@ -167,6 +170,9 @@ var ModelRatio = map[string]float64{
|
||||
"step-1v-32k": 0.024 * RMB,
|
||||
"step-1-32k": 0.024 * RMB,
|
||||
"step-1-200k": 0.15 * RMB,
|
||||
// aws llama3 https://aws.amazon.com/cn/bedrock/pricing/
|
||||
"llama3-8b-8192(33)": 0.0003 / 0.002, // $0.0003 / 1K tokens
|
||||
"llama3-70b-8192(33)": 0.00265 / 0.002, // $0.00265 / 1K tokens
|
||||
// https://cohere.com/pricing
|
||||
"command": 0.5,
|
||||
"command-nightly": 0.5,
|
||||
@@ -183,7 +189,11 @@ var ModelRatio = map[string]float64{
|
||||
"deepl-ja": 25.0 / 1000 * USD,
|
||||
}
|
||||
|
||||
var CompletionRatio = map[string]float64{}
|
||||
var CompletionRatio = map[string]float64{
|
||||
// aws llama3
|
||||
"llama3-8b-8192(33)": 0.0006 / 0.0003,
|
||||
"llama3-70b-8192(33)": 0.0035 / 0.00265,
|
||||
}
|
||||
|
||||
var DefaultModelRatio map[string]float64
|
||||
var DefaultCompletionRatio map[string]float64
|
||||
@@ -232,22 +242,28 @@ func UpdateModelRatioByJSONString(jsonStr string) error {
|
||||
return json.Unmarshal([]byte(jsonStr), &ModelRatio)
|
||||
}
|
||||
|
||||
func GetModelRatio(name string) float64 {
|
||||
func GetModelRatio(name string, channelType int) float64 {
|
||||
if strings.HasPrefix(name, "qwen-") && strings.HasSuffix(name, "-internet") {
|
||||
name = strings.TrimSuffix(name, "-internet")
|
||||
}
|
||||
if strings.HasPrefix(name, "command-") && strings.HasSuffix(name, "-internet") {
|
||||
name = strings.TrimSuffix(name, "-internet")
|
||||
}
|
||||
ratio, ok := ModelRatio[name]
|
||||
if !ok {
|
||||
ratio, ok = DefaultModelRatio[name]
|
||||
model := fmt.Sprintf("%s(%d)", name, channelType)
|
||||
if ratio, ok := ModelRatio[model]; ok {
|
||||
return ratio
|
||||
}
|
||||
if !ok {
|
||||
logger.SysError("model ratio not found: " + name)
|
||||
return 30
|
||||
if ratio, ok := DefaultModelRatio[model]; ok {
|
||||
return ratio
|
||||
}
|
||||
return ratio
|
||||
if ratio, ok := ModelRatio[name]; ok {
|
||||
return ratio
|
||||
}
|
||||
if ratio, ok := DefaultModelRatio[name]; ok {
|
||||
return ratio
|
||||
}
|
||||
logger.SysError("model ratio not found: " + name)
|
||||
return 30
|
||||
}
|
||||
|
||||
func CompletionRatio2JSONString() string {
|
||||
@@ -263,7 +279,17 @@ func UpdateCompletionRatioByJSONString(jsonStr string) error {
|
||||
return json.Unmarshal([]byte(jsonStr), &CompletionRatio)
|
||||
}
|
||||
|
||||
func GetCompletionRatio(name string) float64 {
|
||||
func GetCompletionRatio(name string, channelType int) float64 {
|
||||
if strings.HasPrefix(name, "qwen-") && strings.HasSuffix(name, "-internet") {
|
||||
name = strings.TrimSuffix(name, "-internet")
|
||||
}
|
||||
model := fmt.Sprintf("%s(%d)", name, channelType)
|
||||
if ratio, ok := CompletionRatio[model]; ok {
|
||||
return ratio
|
||||
}
|
||||
if ratio, ok := DefaultCompletionRatio[model]; ok {
|
||||
return ratio
|
||||
}
|
||||
if ratio, ok := CompletionRatio[name]; ok {
|
||||
return ratio
|
||||
}
|
||||
|
||||
@@ -42,5 +42,6 @@ const (
|
||||
DeepL
|
||||
TogetherAI
|
||||
Doubao
|
||||
Novita
|
||||
Dummy
|
||||
)
|
||||
|
||||
@@ -42,6 +42,7 @@ var ChannelBaseURLs = []string{
|
||||
"https://api-free.deepl.com", // 38
|
||||
"https://api.together.xyz", // 39
|
||||
"https://ark.cn-beijing.volces.com", // 40
|
||||
"https://api.novita.ai/v3/openai", // 41
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -7,6 +7,10 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/client"
|
||||
@@ -21,9 +25,6 @@ import (
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func RelayAudioHelper(c *gin.Context, relayMode int) *relaymodel.ErrorWithStatusCode {
|
||||
@@ -53,7 +54,7 @@ func RelayAudioHelper(c *gin.Context, relayMode int) *relaymodel.ErrorWithStatus
|
||||
}
|
||||
}
|
||||
|
||||
modelRatio := billingratio.GetModelRatio(audioModel)
|
||||
modelRatio := billingratio.GetModelRatio(audioModel, channelType)
|
||||
groupRatio := billingratio.GetGroupRatio(group)
|
||||
ratio := modelRatio * groupRatio
|
||||
var quota int64
|
||||
|
||||
@@ -4,6 +4,10 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
@@ -16,9 +20,6 @@ import (
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||
"math"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func getAndValidateTextRequest(c *gin.Context, relayMode int) (*relaymodel.GeneralOpenAIRequest, error) {
|
||||
@@ -40,78 +41,6 @@ func getAndValidateTextRequest(c *gin.Context, relayMode int) (*relaymodel.Gener
|
||||
return textRequest, nil
|
||||
}
|
||||
|
||||
func getImageRequest(c *gin.Context, relayMode int) (*relaymodel.ImageRequest, error) {
|
||||
imageRequest := &relaymodel.ImageRequest{}
|
||||
err := common.UnmarshalBodyReusable(c, imageRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if imageRequest.N == 0 {
|
||||
imageRequest.N = 1
|
||||
}
|
||||
if imageRequest.Size == "" {
|
||||
imageRequest.Size = "1024x1024"
|
||||
}
|
||||
if imageRequest.Model == "" {
|
||||
imageRequest.Model = "dall-e-2"
|
||||
}
|
||||
return imageRequest, nil
|
||||
}
|
||||
|
||||
func isValidImageSize(model string, size string) bool {
|
||||
if model == "cogview-3" {
|
||||
return true
|
||||
}
|
||||
_, ok := billingratio.ImageSizeRatios[model][size]
|
||||
return ok
|
||||
}
|
||||
|
||||
func getImageSizeRatio(model string, size string) float64 {
|
||||
ratio, ok := billingratio.ImageSizeRatios[model][size]
|
||||
if !ok {
|
||||
return 1
|
||||
}
|
||||
return ratio
|
||||
}
|
||||
|
||||
func validateImageRequest(imageRequest *relaymodel.ImageRequest, meta *meta.Meta) *relaymodel.ErrorWithStatusCode {
|
||||
// model validation
|
||||
hasValidSize := isValidImageSize(imageRequest.Model, imageRequest.Size)
|
||||
if !hasValidSize {
|
||||
return openai.ErrorWrapper(errors.New("size not supported for this image model"), "size_not_supported", http.StatusBadRequest)
|
||||
}
|
||||
// check prompt length
|
||||
if imageRequest.Prompt == "" {
|
||||
return openai.ErrorWrapper(errors.New("prompt is required"), "prompt_missing", http.StatusBadRequest)
|
||||
}
|
||||
if len(imageRequest.Prompt) > billingratio.ImagePromptLengthLimitations[imageRequest.Model] {
|
||||
return openai.ErrorWrapper(errors.New("prompt is too long"), "prompt_too_long", http.StatusBadRequest)
|
||||
}
|
||||
// Number of generated images validation
|
||||
if !isWithinRange(imageRequest.Model, imageRequest.N) {
|
||||
// channel not azure
|
||||
if meta.ChannelType != channeltype.Azure {
|
||||
return openai.ErrorWrapper(errors.New("invalid value of n"), "n_not_within_range", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getImageCostRatio(imageRequest *relaymodel.ImageRequest) (float64, error) {
|
||||
if imageRequest == nil {
|
||||
return 0, errors.New("imageRequest is nil")
|
||||
}
|
||||
imageCostRatio := getImageSizeRatio(imageRequest.Model, imageRequest.Size)
|
||||
if imageRequest.Quality == "hd" && imageRequest.Model == "dall-e-3" {
|
||||
if imageRequest.Size == "1024x1024" {
|
||||
imageCostRatio *= 2
|
||||
} else {
|
||||
imageCostRatio *= 1.5
|
||||
}
|
||||
}
|
||||
return imageCostRatio, nil
|
||||
}
|
||||
|
||||
func getPromptTokens(textRequest *relaymodel.GeneralOpenAIRequest, relayMode int) int {
|
||||
switch relayMode {
|
||||
case relaymode.ChatCompletions:
|
||||
@@ -167,7 +96,7 @@ func postConsumeQuota(ctx context.Context, usage *relaymodel.Usage, meta *meta.M
|
||||
return
|
||||
}
|
||||
var quota int64
|
||||
completionRatio := billingratio.GetCompletionRatio(textRequest.Model)
|
||||
completionRatio := billingratio.GetCompletionRatio(textRequest.Model, meta.ChannelType)
|
||||
promptTokens := usage.PromptTokens
|
||||
completionTokens := usage.CompletionTokens
|
||||
quota = int64(math.Ceil((float64(promptTokens) + float64(completionTokens)*completionRatio) * ratio))
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
@@ -22,13 +23,84 @@ import (
|
||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
func isWithinRange(element string, value int) bool {
|
||||
if _, ok := billingratio.ImageGenerationAmounts[element]; !ok {
|
||||
return false
|
||||
func getImageRequest(c *gin.Context, relayMode int) (*relaymodel.ImageRequest, error) {
|
||||
imageRequest := &relaymodel.ImageRequest{}
|
||||
err := common.UnmarshalBodyReusable(c, imageRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
min := billingratio.ImageGenerationAmounts[element][0]
|
||||
max := billingratio.ImageGenerationAmounts[element][1]
|
||||
return value >= min && value <= max
|
||||
if imageRequest.N == 0 {
|
||||
imageRequest.N = 1
|
||||
}
|
||||
if imageRequest.Size == "" {
|
||||
imageRequest.Size = "1024x1024"
|
||||
}
|
||||
if imageRequest.Model == "" {
|
||||
imageRequest.Model = "dall-e-2"
|
||||
}
|
||||
return imageRequest, nil
|
||||
}
|
||||
|
||||
func isValidImageSize(model string, size string) bool {
|
||||
if model == "cogview-3" || billingratio.ImageSizeRatios[model] == nil {
|
||||
return true
|
||||
}
|
||||
_, ok := billingratio.ImageSizeRatios[model][size]
|
||||
return ok
|
||||
}
|
||||
|
||||
func isValidImagePromptLength(model string, promptLength int) bool {
|
||||
maxPromptLength, ok := billingratio.ImagePromptLengthLimitations[model]
|
||||
return !ok || promptLength <= maxPromptLength
|
||||
}
|
||||
|
||||
func isWithinRange(element string, value int) bool {
|
||||
amounts, ok := billingratio.ImageGenerationAmounts[element]
|
||||
return !ok || (value >= amounts[0] && value <= amounts[1])
|
||||
}
|
||||
|
||||
func getImageSizeRatio(model string, size string) float64 {
|
||||
if ratio, ok := billingratio.ImageSizeRatios[model][size]; ok {
|
||||
return ratio
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func validateImageRequest(imageRequest *relaymodel.ImageRequest, meta *meta.Meta) *relaymodel.ErrorWithStatusCode {
|
||||
// check prompt length
|
||||
if imageRequest.Prompt == "" {
|
||||
return openai.ErrorWrapper(errors.New("prompt is required"), "prompt_missing", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// model validation
|
||||
if !isValidImageSize(imageRequest.Model, imageRequest.Size) {
|
||||
return openai.ErrorWrapper(errors.New("size not supported for this image model"), "size_not_supported", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if !isValidImagePromptLength(imageRequest.Model, len(imageRequest.Prompt)) {
|
||||
return openai.ErrorWrapper(errors.New("prompt is too long"), "prompt_too_long", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// Number of generated images validation
|
||||
if !isWithinRange(imageRequest.Model, imageRequest.N) {
|
||||
return openai.ErrorWrapper(errors.New("invalid value of n"), "n_not_within_range", http.StatusBadRequest)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getImageCostRatio(imageRequest *relaymodel.ImageRequest) (float64, error) {
|
||||
if imageRequest == nil {
|
||||
return 0, errors.New("imageRequest is nil")
|
||||
}
|
||||
imageCostRatio := getImageSizeRatio(imageRequest.Model, imageRequest.Size)
|
||||
if imageRequest.Quality == "hd" && imageRequest.Model == "dall-e-3" {
|
||||
if imageRequest.Size == "1024x1024" {
|
||||
imageCostRatio *= 2
|
||||
} else {
|
||||
imageCostRatio *= 1.5
|
||||
}
|
||||
}
|
||||
return imageCostRatio, nil
|
||||
}
|
||||
|
||||
func RelayImageHelper(c *gin.Context, relayMode int) *relaymodel.ErrorWithStatusCode {
|
||||
@@ -97,7 +169,7 @@ func RelayImageHelper(c *gin.Context, relayMode int) *relaymodel.ErrorWithStatus
|
||||
requestBody = bytes.NewBuffer(jsonStr)
|
||||
}
|
||||
|
||||
modelRatio := billingratio.GetModelRatio(imageModel)
|
||||
modelRatio := billingratio.GetModelRatio(imageModel, meta.ChannelType)
|
||||
groupRatio := billingratio.GetGroupRatio(meta.Group)
|
||||
ratio := modelRatio * groupRatio
|
||||
userQuota, err := model.CacheGetUserQuota(ctx, meta.UserId)
|
||||
|
||||
@@ -4,6 +4,9 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/relay"
|
||||
@@ -14,8 +17,6 @@ import (
|
||||
"github.com/songquanpeng/one-api/relay/channeltype"
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode {
|
||||
@@ -35,7 +36,7 @@ func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode {
|
||||
textRequest.Model, isModelMapped = getMappedModelName(textRequest.Model, meta.ModelMapping)
|
||||
meta.ActualModelName = textRequest.Model
|
||||
// get model ratio & group ratio
|
||||
modelRatio := billingratio.GetModelRatio(textRequest.Model)
|
||||
modelRatio := billingratio.GetModelRatio(textRequest.Model, meta.ChannelType)
|
||||
groupRatio := billingratio.GetGroupRatio(meta.Group)
|
||||
ratio := modelRatio * groupRatio
|
||||
// pre-consume quota
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package model
|
||||
|
||||
type Message struct {
|
||||
Role string `json:"role,omitempty"`
|
||||
Content any `json:"content,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
ToolCalls []Tool `json:"tool_calls,omitempty"`
|
||||
Role string `json:"role,omitempty"`
|
||||
Content any `json:"content,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
ToolCalls []Tool `json:"tool_calls,omitempty"`
|
||||
ToolCallId string `json:"tool_call_id,omitempty"`
|
||||
}
|
||||
|
||||
func (m Message) IsStringContent() bool {
|
||||
|
||||
@@ -2,13 +2,13 @@ package model
|
||||
|
||||
type Tool struct {
|
||||
Id string `json:"id,omitempty"`
|
||||
Type string `json:"type"`
|
||||
Type string `json:"type,omitempty"` // when splicing claude tools stream messages, it is empty
|
||||
Function Function `json:"function"`
|
||||
}
|
||||
|
||||
type Function struct {
|
||||
Description string `json:"description,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Name string `json:"name,omitempty"` // when splicing claude tools stream messages, it is empty
|
||||
Parameters any `json:"parameters,omitempty"` // request
|
||||
Arguments any `json:"arguments,omitempty"` // response
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user