mirror of
https://github.com/linux-do/new-api.git
synced 2025-09-23 18:36:37 +08:00
support gpt-4-1106-vision-preview
This commit is contained in:
parent
fc494416fc
commit
7e0d2606c3
@ -86,9 +86,10 @@ func buildTestRequest() *ChatRequest {
|
|||||||
Model: "", // this will be set later
|
Model: "", // this will be set later
|
||||||
MaxTokens: 1,
|
MaxTokens: 1,
|
||||||
}
|
}
|
||||||
|
content, _ := json.Marshal("hi")
|
||||||
testMessage := Message{
|
testMessage := Message{
|
||||||
Role: "user",
|
Role: "user",
|
||||||
Content: "hi",
|
Content: content,
|
||||||
}
|
}
|
||||||
testRequest.Messages = append(testRequest.Messages, testMessage)
|
testRequest.Messages = append(testRequest.Messages, testMessage)
|
||||||
return testRequest
|
return testRequest
|
||||||
@ -186,6 +187,10 @@ func testAllChannels(notify bool) error {
|
|||||||
err = errors.New(fmt.Sprintf("响应时间 %.2fs 超过阈值 %.2fs", float64(milliseconds)/1000.0, float64(disableThreshold)/1000.0))
|
err = errors.New(fmt.Sprintf("响应时间 %.2fs 超过阈值 %.2fs", float64(milliseconds)/1000.0, float64(disableThreshold)/1000.0))
|
||||||
ban = true
|
ban = true
|
||||||
}
|
}
|
||||||
|
if openaiErr != nil {
|
||||||
|
err = errors.New(fmt.Sprintf("type %s, code %v, message %s", openaiErr.Type, openaiErr.Code, openaiErr.Message))
|
||||||
|
ban = true
|
||||||
|
}
|
||||||
// parse *int to bool
|
// parse *int to bool
|
||||||
if channel.AutoBan != nil && *channel.AutoBan == 0 {
|
if channel.AutoBan != nil && *channel.AutoBan == 0 {
|
||||||
ban = false
|
ban = false
|
||||||
|
@ -48,7 +48,7 @@ type AIProxyLibraryStreamResponse struct {
|
|||||||
func requestOpenAI2AIProxyLibrary(request GeneralOpenAIRequest) *AIProxyLibraryRequest {
|
func requestOpenAI2AIProxyLibrary(request GeneralOpenAIRequest) *AIProxyLibraryRequest {
|
||||||
query := ""
|
query := ""
|
||||||
if len(request.Messages) != 0 {
|
if len(request.Messages) != 0 {
|
||||||
query = request.Messages[len(request.Messages)-1].Content
|
query = string(request.Messages[len(request.Messages)-1].Content)
|
||||||
}
|
}
|
||||||
return &AIProxyLibraryRequest{
|
return &AIProxyLibraryRequest{
|
||||||
Model: request.Model,
|
Model: request.Model,
|
||||||
@ -69,7 +69,7 @@ func aiProxyDocuments2Markdown(documents []AIProxyLibraryDocument) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func responseAIProxyLibrary2OpenAI(response *AIProxyLibraryResponse) *OpenAITextResponse {
|
func responseAIProxyLibrary2OpenAI(response *AIProxyLibraryResponse) *OpenAITextResponse {
|
||||||
content := response.Answer + aiProxyDocuments2Markdown(response.Documents)
|
content, _ := json.Marshal(response.Answer + aiProxyDocuments2Markdown(response.Documents))
|
||||||
choice := OpenAITextResponseChoice{
|
choice := OpenAITextResponseChoice{
|
||||||
Index: 0,
|
Index: 0,
|
||||||
Message: Message{
|
Message: Message{
|
||||||
|
@ -88,18 +88,18 @@ func requestOpenAI2Ali(request GeneralOpenAIRequest) *AliChatRequest {
|
|||||||
message := request.Messages[i]
|
message := request.Messages[i]
|
||||||
if message.Role == "system" {
|
if message.Role == "system" {
|
||||||
messages = append(messages, AliMessage{
|
messages = append(messages, AliMessage{
|
||||||
User: message.Content,
|
User: string(message.Content),
|
||||||
Bot: "Okay",
|
Bot: "Okay",
|
||||||
})
|
})
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
if i == len(request.Messages)-1 {
|
if i == len(request.Messages)-1 {
|
||||||
prompt = message.Content
|
prompt = string(message.Content)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
messages = append(messages, AliMessage{
|
messages = append(messages, AliMessage{
|
||||||
User: message.Content,
|
User: string(message.Content),
|
||||||
Bot: request.Messages[i+1].Content,
|
Bot: string(request.Messages[i+1].Content),
|
||||||
})
|
})
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
@ -184,11 +184,12 @@ func embeddingResponseAli2OpenAI(response *AliEmbeddingResponse) *OpenAIEmbeddin
|
|||||||
}
|
}
|
||||||
|
|
||||||
func responseAli2OpenAI(response *AliChatResponse) *OpenAITextResponse {
|
func responseAli2OpenAI(response *AliChatResponse) *OpenAITextResponse {
|
||||||
|
content, _ := json.Marshal(response.Output.Text)
|
||||||
choice := OpenAITextResponseChoice{
|
choice := OpenAITextResponseChoice{
|
||||||
Index: 0,
|
Index: 0,
|
||||||
Message: Message{
|
Message: Message{
|
||||||
Role: "assistant",
|
Role: "assistant",
|
||||||
Content: response.Output.Text,
|
Content: content,
|
||||||
},
|
},
|
||||||
FinishReason: response.Output.FinishReason,
|
FinishReason: response.Output.FinishReason,
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ func requestOpenAI2Baidu(request GeneralOpenAIRequest) *BaiduChatRequest {
|
|||||||
if message.Role == "system" {
|
if message.Role == "system" {
|
||||||
messages = append(messages, BaiduMessage{
|
messages = append(messages, BaiduMessage{
|
||||||
Role: "user",
|
Role: "user",
|
||||||
Content: message.Content,
|
Content: string(message.Content),
|
||||||
})
|
})
|
||||||
messages = append(messages, BaiduMessage{
|
messages = append(messages, BaiduMessage{
|
||||||
Role: "assistant",
|
Role: "assistant",
|
||||||
@ -98,7 +98,7 @@ func requestOpenAI2Baidu(request GeneralOpenAIRequest) *BaiduChatRequest {
|
|||||||
} else {
|
} else {
|
||||||
messages = append(messages, BaiduMessage{
|
messages = append(messages, BaiduMessage{
|
||||||
Role: message.Role,
|
Role: message.Role,
|
||||||
Content: message.Content,
|
Content: string(message.Content),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -109,11 +109,12 @@ func requestOpenAI2Baidu(request GeneralOpenAIRequest) *BaiduChatRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func responseBaidu2OpenAI(response *BaiduChatResponse) *OpenAITextResponse {
|
func responseBaidu2OpenAI(response *BaiduChatResponse) *OpenAITextResponse {
|
||||||
|
content, _ := json.Marshal(response.Result)
|
||||||
choice := OpenAITextResponseChoice{
|
choice := OpenAITextResponseChoice{
|
||||||
Index: 0,
|
Index: 0,
|
||||||
Message: Message{
|
Message: Message{
|
||||||
Role: "assistant",
|
Role: "assistant",
|
||||||
Content: response.Result,
|
Content: content,
|
||||||
},
|
},
|
||||||
FinishReason: "stop",
|
FinishReason: "stop",
|
||||||
}
|
}
|
||||||
|
@ -93,11 +93,12 @@ func streamResponseClaude2OpenAI(claudeResponse *ClaudeResponse) *ChatCompletion
|
|||||||
}
|
}
|
||||||
|
|
||||||
func responseClaude2OpenAI(claudeResponse *ClaudeResponse) *OpenAITextResponse {
|
func responseClaude2OpenAI(claudeResponse *ClaudeResponse) *OpenAITextResponse {
|
||||||
|
content, _ := json.Marshal(strings.TrimPrefix(claudeResponse.Completion, " "))
|
||||||
choice := OpenAITextResponseChoice{
|
choice := OpenAITextResponseChoice{
|
||||||
Index: 0,
|
Index: 0,
|
||||||
Message: Message{
|
Message: Message{
|
||||||
Role: "assistant",
|
Role: "assistant",
|
||||||
Content: strings.TrimPrefix(claudeResponse.Completion, " "),
|
Content: content,
|
||||||
Name: nil,
|
Name: nil,
|
||||||
},
|
},
|
||||||
FinishReason: stopReasonClaude2OpenAI(claudeResponse.StopReason),
|
FinishReason: stopReasonClaude2OpenAI(claudeResponse.StopReason),
|
||||||
|
@ -132,7 +132,7 @@ func openaiHandler(c *gin.Context, resp *http.Response, consumeQuota bool, promp
|
|||||||
if textResponse.Usage.TotalTokens == 0 {
|
if textResponse.Usage.TotalTokens == 0 {
|
||||||
completionTokens := 0
|
completionTokens := 0
|
||||||
for _, choice := range textResponse.Choices {
|
for _, choice := range textResponse.Choices {
|
||||||
completionTokens += countTokenText(choice.Message.Content, model)
|
completionTokens += countTokenText(string(choice.Message.Content), model)
|
||||||
}
|
}
|
||||||
textResponse.Usage = Usage{
|
textResponse.Usage = Usage{
|
||||||
PromptTokens: promptTokens,
|
PromptTokens: promptTokens,
|
||||||
|
@ -59,7 +59,7 @@ func requestOpenAI2PaLM(textRequest GeneralOpenAIRequest) *PaLMChatRequest {
|
|||||||
}
|
}
|
||||||
for _, message := range textRequest.Messages {
|
for _, message := range textRequest.Messages {
|
||||||
palmMessage := PaLMChatMessage{
|
palmMessage := PaLMChatMessage{
|
||||||
Content: message.Content,
|
Content: string(message.Content),
|
||||||
}
|
}
|
||||||
if message.Role == "user" {
|
if message.Role == "user" {
|
||||||
palmMessage.Author = "0"
|
palmMessage.Author = "0"
|
||||||
@ -76,11 +76,12 @@ func responsePaLM2OpenAI(response *PaLMChatResponse) *OpenAITextResponse {
|
|||||||
Choices: make([]OpenAITextResponseChoice, 0, len(response.Candidates)),
|
Choices: make([]OpenAITextResponseChoice, 0, len(response.Candidates)),
|
||||||
}
|
}
|
||||||
for i, candidate := range response.Candidates {
|
for i, candidate := range response.Candidates {
|
||||||
|
content, _ := json.Marshal(candidate.Content)
|
||||||
choice := OpenAITextResponseChoice{
|
choice := OpenAITextResponseChoice{
|
||||||
Index: i,
|
Index: i,
|
||||||
Message: Message{
|
Message: Message{
|
||||||
Role: "assistant",
|
Role: "assistant",
|
||||||
Content: candidate.Content,
|
Content: content,
|
||||||
},
|
},
|
||||||
FinishReason: "stop",
|
FinishReason: "stop",
|
||||||
}
|
}
|
||||||
|
@ -84,7 +84,7 @@ func requestOpenAI2Tencent(request GeneralOpenAIRequest) *TencentChatRequest {
|
|||||||
if message.Role == "system" {
|
if message.Role == "system" {
|
||||||
messages = append(messages, TencentMessage{
|
messages = append(messages, TencentMessage{
|
||||||
Role: "user",
|
Role: "user",
|
||||||
Content: message.Content,
|
Content: string(message.Content),
|
||||||
})
|
})
|
||||||
messages = append(messages, TencentMessage{
|
messages = append(messages, TencentMessage{
|
||||||
Role: "assistant",
|
Role: "assistant",
|
||||||
@ -93,7 +93,7 @@ func requestOpenAI2Tencent(request GeneralOpenAIRequest) *TencentChatRequest {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
messages = append(messages, TencentMessage{
|
messages = append(messages, TencentMessage{
|
||||||
Content: message.Content,
|
Content: string(message.Content),
|
||||||
Role: message.Role,
|
Role: message.Role,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -119,11 +119,12 @@ func responseTencent2OpenAI(response *TencentChatResponse) *OpenAITextResponse {
|
|||||||
Usage: response.Usage,
|
Usage: response.Usage,
|
||||||
}
|
}
|
||||||
if len(response.Choices) > 0 {
|
if len(response.Choices) > 0 {
|
||||||
|
content, _ := json.Marshal(response.Choices[0].Messages.Content)
|
||||||
choice := OpenAITextResponseChoice{
|
choice := OpenAITextResponseChoice{
|
||||||
Index: 0,
|
Index: 0,
|
||||||
Message: Message{
|
Message: Message{
|
||||||
Role: "assistant",
|
Role: "assistant",
|
||||||
Content: response.Choices[0].Messages.Content,
|
Content: content,
|
||||||
},
|
},
|
||||||
FinishReason: response.Choices[0].FinishReason,
|
FinishReason: response.Choices[0].FinishReason,
|
||||||
}
|
}
|
||||||
|
@ -199,9 +199,13 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
|||||||
}
|
}
|
||||||
var promptTokens int
|
var promptTokens int
|
||||||
var completionTokens int
|
var completionTokens int
|
||||||
|
var err error
|
||||||
switch relayMode {
|
switch relayMode {
|
||||||
case RelayModeChatCompletions:
|
case RelayModeChatCompletions:
|
||||||
promptTokens = countTokenMessages(textRequest.Messages, textRequest.Model)
|
promptTokens, err = countTokenMessages(textRequest.Messages, textRequest.Model)
|
||||||
|
if err != nil {
|
||||||
|
return errorWrapper(err, "count_token_messages_failed", http.StatusInternalServerError)
|
||||||
|
}
|
||||||
case RelayModeCompletions:
|
case RelayModeCompletions:
|
||||||
promptTokens = countTokenInput(textRequest.Prompt, textRequest.Model)
|
promptTokens = countTokenInput(textRequest.Prompt, textRequest.Model)
|
||||||
case RelayModeModerations:
|
case RelayModeModerations:
|
||||||
|
@ -63,7 +63,8 @@ func getTokenNum(tokenEncoder *tiktoken.Tiktoken, text string) int {
|
|||||||
return len(tokenEncoder.Encode(text, nil, nil))
|
return len(tokenEncoder.Encode(text, nil, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func countTokenMessages(messages []Message, model string) int {
|
func countTokenMessages(messages []Message, model string) (int, error) {
|
||||||
|
//recover when panic
|
||||||
tokenEncoder := getTokenEncoder(model)
|
tokenEncoder := getTokenEncoder(model)
|
||||||
// Reference:
|
// Reference:
|
||||||
// https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
// https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||||
@ -82,15 +83,33 @@ func countTokenMessages(messages []Message, model string) int {
|
|||||||
tokenNum := 0
|
tokenNum := 0
|
||||||
for _, message := range messages {
|
for _, message := range messages {
|
||||||
tokenNum += tokensPerMessage
|
tokenNum += tokensPerMessage
|
||||||
tokenNum += getTokenNum(tokenEncoder, message.Content)
|
|
||||||
tokenNum += getTokenNum(tokenEncoder, message.Role)
|
tokenNum += getTokenNum(tokenEncoder, message.Role)
|
||||||
if message.Name != nil {
|
var arrayContent []MediaMessage
|
||||||
tokenNum += tokensPerName
|
if err := json.Unmarshal(message.Content, &arrayContent); err != nil {
|
||||||
tokenNum += getTokenNum(tokenEncoder, *message.Name)
|
|
||||||
|
var stringContent string
|
||||||
|
if err := json.Unmarshal(message.Content, &stringContent); err != nil {
|
||||||
|
return 0, err
|
||||||
|
} else {
|
||||||
|
tokenNum += getTokenNum(tokenEncoder, stringContent)
|
||||||
|
if message.Name != nil {
|
||||||
|
tokenNum += tokensPerName
|
||||||
|
tokenNum += getTokenNum(tokenEncoder, *message.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, m := range arrayContent {
|
||||||
|
if m.Type == "image_url" {
|
||||||
|
//TODO: getImageToken
|
||||||
|
tokenNum += 1000
|
||||||
|
} else {
|
||||||
|
tokenNum += getTokenNum(tokenEncoder, m.Text)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tokenNum += 3 // Every reply is primed with <|start|>assistant<|message|>
|
tokenNum += 3 // Every reply is primed with <|start|>assistant<|message|>
|
||||||
return tokenNum
|
return tokenNum, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func countTokenInput(input any, model string) int {
|
func countTokenInput(input any, model string) int {
|
||||||
|
@ -81,7 +81,7 @@ func requestOpenAI2Xunfei(request GeneralOpenAIRequest, xunfeiAppId string, doma
|
|||||||
if message.Role == "system" {
|
if message.Role == "system" {
|
||||||
messages = append(messages, XunfeiMessage{
|
messages = append(messages, XunfeiMessage{
|
||||||
Role: "user",
|
Role: "user",
|
||||||
Content: message.Content,
|
Content: string(message.Content),
|
||||||
})
|
})
|
||||||
messages = append(messages, XunfeiMessage{
|
messages = append(messages, XunfeiMessage{
|
||||||
Role: "assistant",
|
Role: "assistant",
|
||||||
@ -90,7 +90,7 @@ func requestOpenAI2Xunfei(request GeneralOpenAIRequest, xunfeiAppId string, doma
|
|||||||
} else {
|
} else {
|
||||||
messages = append(messages, XunfeiMessage{
|
messages = append(messages, XunfeiMessage{
|
||||||
Role: message.Role,
|
Role: message.Role,
|
||||||
Content: message.Content,
|
Content: string(message.Content),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -112,11 +112,12 @@ func responseXunfei2OpenAI(response *XunfeiChatResponse) *OpenAITextResponse {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
content, _ := json.Marshal(response.Payload.Choices.Text[0].Content)
|
||||||
choice := OpenAITextResponseChoice{
|
choice := OpenAITextResponseChoice{
|
||||||
Index: 0,
|
Index: 0,
|
||||||
Message: Message{
|
Message: Message{
|
||||||
Role: "assistant",
|
Role: "assistant",
|
||||||
Content: response.Payload.Choices.Text[0].Content,
|
Content: content,
|
||||||
},
|
},
|
||||||
FinishReason: stopFinishReason,
|
FinishReason: stopFinishReason,
|
||||||
}
|
}
|
||||||
|
@ -114,7 +114,7 @@ func requestOpenAI2Zhipu(request GeneralOpenAIRequest) *ZhipuRequest {
|
|||||||
if message.Role == "system" {
|
if message.Role == "system" {
|
||||||
messages = append(messages, ZhipuMessage{
|
messages = append(messages, ZhipuMessage{
|
||||||
Role: "system",
|
Role: "system",
|
||||||
Content: message.Content,
|
Content: string(message.Content),
|
||||||
})
|
})
|
||||||
messages = append(messages, ZhipuMessage{
|
messages = append(messages, ZhipuMessage{
|
||||||
Role: "user",
|
Role: "user",
|
||||||
@ -123,7 +123,7 @@ func requestOpenAI2Zhipu(request GeneralOpenAIRequest) *ZhipuRequest {
|
|||||||
} else {
|
} else {
|
||||||
messages = append(messages, ZhipuMessage{
|
messages = append(messages, ZhipuMessage{
|
||||||
Role: message.Role,
|
Role: message.Role,
|
||||||
Content: message.Content,
|
Content: string(message.Content),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -144,11 +144,12 @@ func responseZhipu2OpenAI(response *ZhipuResponse) *OpenAITextResponse {
|
|||||||
Usage: response.Data.Usage,
|
Usage: response.Data.Usage,
|
||||||
}
|
}
|
||||||
for i, choice := range response.Data.Choices {
|
for i, choice := range response.Data.Choices {
|
||||||
|
content, _ := json.Marshal(strings.Trim(choice.Content, "\""))
|
||||||
openaiChoice := OpenAITextResponseChoice{
|
openaiChoice := OpenAITextResponseChoice{
|
||||||
Index: i,
|
Index: i,
|
||||||
Message: Message{
|
Message: Message{
|
||||||
Role: choice.Role,
|
Role: choice.Role,
|
||||||
Content: strings.Trim(choice.Content, "\""),
|
Content: content,
|
||||||
},
|
},
|
||||||
FinishReason: "",
|
FinishReason: "",
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package controller
|
package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -12,9 +13,20 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Message struct {
|
type Message struct {
|
||||||
Role string `json:"role"`
|
Role string `json:"role"`
|
||||||
Content string `json:"content"`
|
Content json.RawMessage `json:"content"`
|
||||||
Name *string `json:"name,omitempty"`
|
Name *string `json:"name,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MediaMessage struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Text string `json:"text"`
|
||||||
|
ImageUrl MessageImageUrl `json:"image_url,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MessageImageUrl struct {
|
||||||
|
Url string `json:"url"`
|
||||||
|
Detail string `json:"detail"`
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
Loading…
Reference in New Issue
Block a user