Merge branch 'songquanpeng' into sync_upstream

This commit is contained in:
Martial BE
2023-12-21 15:36:01 +08:00
41 changed files with 1045 additions and 89 deletions

View File

@@ -33,6 +33,9 @@ func (p *AliProvider) GetRequestHeaders() (headers map[string]string) {
headers = make(map[string]string)
p.CommonRequestHeaders(headers)
headers["Authorization"] = fmt.Sprintf("Bearer %s", p.Context.GetString("api_key"))
if p.Context.GetString("plugin") != "" {
headers["X-DashScope-Plugin"] = p.Context.GetString("plugin")
}
return headers
}

View File

@@ -53,32 +53,17 @@ func (aliResponse *AliChatResponse) ResponseHandler(resp *http.Response) (OpenAI
// 获取聊天请求体
func (p *AliProvider) getChatRequestBody(request *types.ChatCompletionRequest) *AliChatRequest {
messages := make([]AliMessage, 0, len(request.Messages))
prompt := ""
for i := 0; i < len(request.Messages); i++ {
message := request.Messages[i]
if message.Role == "system" {
messages = append(messages, AliMessage{
User: message.StringContent(),
Bot: "Okay",
})
continue
} else {
if i == len(request.Messages)-1 {
prompt = message.StringContent()
break
}
messages = append(messages, AliMessage{
User: message.StringContent(),
Bot: request.Messages[i+1].StringContent(),
})
i++
}
messages = append(messages, AliMessage{
Content: message.StringContent(),
Role: strings.ToLower(message.Role),
})
}
return &AliChatRequest{
Model: request.Model,
Input: AliInput{
Prompt: prompt,
History: messages,
Messages: messages,
},
}
}

View File

@@ -13,13 +13,13 @@ type AliUsage struct {
}
type AliMessage struct {
User string `json:"user"`
Bot string `json:"bot"`
Content string `json:"content"`
Role string `json:"role"`
}
type AliInput struct {
Prompt string `json:"prompt"`
History []AliMessage `json:"history"`
// Prompt string `json:"prompt"`
Messages []AliMessage `json:"messages"`
}
type AliParameters struct {

View File

@@ -69,7 +69,9 @@ func (p *ClaudeProvider) getChatRequestBody(request *types.ChatCompletionRequest
} else if message.Role == "assistant" {
prompt += fmt.Sprintf("\n\nAssistant: %s", message.Content)
} else if message.Role == "system" {
prompt += fmt.Sprintf("\n\nSystem: %s", message.Content)
if prompt == "" {
prompt = message.StringContent()
}
}
}
prompt += "\n\nAssistant:"

45
providers/gemini/base.go Normal file
View File

@@ -0,0 +1,45 @@
package gemini
import (
"fmt"
"one-api/providers/base"
"strings"
"github.com/gin-gonic/gin"
)
type GeminiProviderFactory struct{}
// 创建 ClaudeProvider
func (f GeminiProviderFactory) Create(c *gin.Context) base.ProviderInterface {
return &GeminiProvider{
BaseProvider: base.BaseProvider{
BaseURL: "https://generativelanguage.googleapis.com",
ChatCompletions: "/",
Context: c,
},
}
}
type GeminiProvider struct {
base.BaseProvider
}
func (p *GeminiProvider) GetFullRequestURL(requestURL string, modelName string) string {
baseURL := strings.TrimSuffix(p.GetBaseURL(), "/")
version := "v1"
if p.Context.GetString("api_version") != "" {
version = p.Context.GetString("api_version")
}
return fmt.Sprintf("%s/%s/models/%s:%s?key=%s", baseURL, version, modelName, requestURL, p.Context.GetString("api_key"))
}
// 获取请求头
func (p *GeminiProvider) GetRequestHeaders() (headers map[string]string) {
headers = make(map[string]string)
p.CommonRequestHeaders(headers)
return headers
}

261
providers/gemini/chat.go Normal file
View File

@@ -0,0 +1,261 @@
package gemini
import (
"bufio"
"encoding/json"
"fmt"
"io"
"net/http"
"one-api/common"
"one-api/providers/base"
"one-api/types"
"strings"
)
func (response *GeminiChatResponse) ResponseHandler(resp *http.Response) (OpenAIResponse any, errWithCode *types.OpenAIErrorWithStatusCode) {
if len(response.Candidates) == 0 {
return nil, &types.OpenAIErrorWithStatusCode{
OpenAIError: types.OpenAIError{
Message: "No candidates returned",
Type: "server_error",
Param: "",
Code: 500,
},
StatusCode: resp.StatusCode,
}
}
fullTextResponse := &types.ChatCompletionResponse{
ID: fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
Object: "chat.completion",
Created: common.GetTimestamp(),
Choices: make([]types.ChatCompletionChoice, 0, len(response.Candidates)),
}
for i, candidate := range response.Candidates {
choice := types.ChatCompletionChoice{
Index: i,
Message: types.ChatCompletionMessage{
Role: "assistant",
Content: "",
},
FinishReason: base.StopFinishReason,
}
if len(candidate.Content.Parts) > 0 {
choice.Message.Content = candidate.Content.Parts[0].Text
}
fullTextResponse.Choices = append(fullTextResponse.Choices, choice)
}
completionTokens := common.CountTokenText(response.GetResponseText(), "gemini-pro")
response.Usage.CompletionTokens = completionTokens
response.Usage.TotalTokens = response.Usage.PromptTokens + completionTokens
return fullTextResponse, nil
}
// Setting safety to the lowest possible values since Gemini is already powerless enough
func (p *GeminiProvider) getChatRequestBody(request *types.ChatCompletionRequest) (requestBody *GeminiChatRequest) {
geminiRequest := GeminiChatRequest{
Contents: make([]GeminiChatContent, 0, len(request.Messages)),
//SafetySettings: []GeminiChatSafetySettings{
// {
// Category: "HARM_CATEGORY_HARASSMENT",
// Threshold: "BLOCK_ONLY_HIGH",
// },
// {
// Category: "HARM_CATEGORY_HATE_SPEECH",
// Threshold: "BLOCK_ONLY_HIGH",
// },
// {
// Category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
// Threshold: "BLOCK_ONLY_HIGH",
// },
// {
// Category: "HARM_CATEGORY_DANGEROUS_CONTENT",
// Threshold: "BLOCK_ONLY_HIGH",
// },
//},
GenerationConfig: GeminiChatGenerationConfig{
Temperature: request.Temperature,
TopP: request.TopP,
MaxOutputTokens: request.MaxTokens,
},
}
if request.Functions != nil {
geminiRequest.Tools = []GeminiChatTools{
{
FunctionDeclarations: request.Functions,
},
}
}
shouldAddDummyModelMessage := false
for _, message := range request.Messages {
content := GeminiChatContent{
Role: message.Role,
Parts: []GeminiPart{
{
Text: message.StringContent(),
},
},
}
// there's no assistant role in gemini and API shall vomit if Role is not user or model
if content.Role == "assistant" {
content.Role = "model"
}
// Converting system prompt to prompt from user for the same reason
if content.Role == "system" {
content.Role = "user"
shouldAddDummyModelMessage = true
}
geminiRequest.Contents = append(geminiRequest.Contents, content)
// If a system message is the last message, we need to add a dummy model message to make gemini happy
if shouldAddDummyModelMessage {
geminiRequest.Contents = append(geminiRequest.Contents, GeminiChatContent{
Role: "model",
Parts: []GeminiPart{
{
Text: "Okay",
},
},
})
shouldAddDummyModelMessage = false
}
}
return &geminiRequest
}
func (p *GeminiProvider) ChatAction(request *types.ChatCompletionRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
requestBody := p.getChatRequestBody(request)
fullRequestURL := p.GetFullRequestURL("generateContent", request.Model)
headers := p.GetRequestHeaders()
if request.Stream {
headers["Accept"] = "text/event-stream"
}
client := common.NewClient()
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
if err != nil {
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
}
if request.Stream {
var responseText string
errWithCode, responseText = p.sendStreamRequest(req)
if errWithCode != nil {
return
}
usage.PromptTokens = promptTokens
usage.CompletionTokens = common.CountTokenText(responseText, request.Model)
usage.TotalTokens = promptTokens + usage.CompletionTokens
} else {
var geminiResponse = &GeminiChatResponse{
Usage: &types.Usage{
PromptTokens: promptTokens,
},
}
errWithCode = p.SendRequest(req, geminiResponse, false)
if errWithCode != nil {
return
}
usage = geminiResponse.Usage
}
return
}
func (p *GeminiProvider) streamResponseClaude2OpenAI(geminiResponse *GeminiChatResponse) *types.ChatCompletionStreamResponse {
var choice types.ChatCompletionStreamChoice
choice.Delta.Content = geminiResponse.GetResponseText()
choice.FinishReason = &base.StopFinishReason
var response types.ChatCompletionStreamResponse
response.Object = "chat.completion.chunk"
response.Model = "gemini"
response.Choices = []types.ChatCompletionStreamChoice{choice}
return &response
}
func (p *GeminiProvider) sendStreamRequest(req *http.Request) (*types.OpenAIErrorWithStatusCode, string) {
defer req.Body.Close()
// 发送请求
resp, err := common.HttpClient.Do(req)
if err != nil {
return common.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError), ""
}
if common.IsFailureStatusCode(resp) {
return common.HandleErrorResp(resp), ""
}
defer resp.Body.Close()
responseText := ""
dataChan := make(chan string)
stopChan := make(chan bool)
scanner := bufio.NewScanner(resp.Body)
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := strings.Index(string(data), "\n"); i >= 0 {
return i + 1, data[0:i], nil
}
if atEOF {
return len(data), data, nil
}
return 0, nil, nil
})
go func() {
for scanner.Scan() {
data := scanner.Text()
data = strings.TrimSpace(data)
if !strings.HasPrefix(data, "\"text\": \"") {
continue
}
data = strings.TrimPrefix(data, "\"text\": \"")
data = strings.TrimSuffix(data, "\"")
dataChan <- data
}
stopChan <- true
}()
common.SetEventStreamHeaders(p.Context)
p.Context.Stream(func(w io.Writer) bool {
select {
case data := <-dataChan:
// this is used to prevent annoying \ related format bug
data = fmt.Sprintf("{\"content\": \"%s\"}", data)
type dummyStruct struct {
Content string `json:"content"`
}
var dummy dummyStruct
err := json.Unmarshal([]byte(data), &dummy)
responseText += dummy.Content
var choice types.ChatCompletionStreamChoice
choice.Delta.Content = dummy.Content
response := types.ChatCompletionStreamResponse{
ID: fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
Object: "chat.completion.chunk",
Created: common.GetTimestamp(),
Model: "gemini-pro",
Choices: []types.ChatCompletionStreamChoice{choice},
}
jsonResponse, err := json.Marshal(response)
if err != nil {
common.SysError("error marshalling stream response: " + err.Error())
return true
}
p.Context.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
return true
case <-stopChan:
p.Context.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
return false
}
})
return nil, responseText
}

75
providers/gemini/type.go Normal file
View File

@@ -0,0 +1,75 @@
package gemini
import "one-api/types"
type GeminiChatRequest struct {
Contents []GeminiChatContent `json:"contents"`
SafetySettings []GeminiChatSafetySettings `json:"safety_settings,omitempty"`
GenerationConfig GeminiChatGenerationConfig `json:"generation_config,omitempty"`
Tools []GeminiChatTools `json:"tools,omitempty"`
}
type GeminiInlineData struct {
MimeType string `json:"mimeType"`
Data string `json:"data"`
}
type GeminiPart struct {
Text string `json:"text,omitempty"`
InlineData *GeminiInlineData `json:"inlineData,omitempty"`
}
type GeminiChatContent struct {
Role string `json:"role,omitempty"`
Parts []GeminiPart `json:"parts"`
}
type GeminiChatSafetySettings struct {
Category string `json:"category"`
Threshold string `json:"threshold"`
}
type GeminiChatTools struct {
FunctionDeclarations any `json:"functionDeclarations,omitempty"`
}
type GeminiChatGenerationConfig struct {
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"topP,omitempty"`
TopK float64 `json:"topK,omitempty"`
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
CandidateCount int `json:"candidateCount,omitempty"`
StopSequences []string `json:"stopSequences,omitempty"`
}
type GeminiChatResponse struct {
Candidates []GeminiChatCandidate `json:"candidates"`
PromptFeedback GeminiChatPromptFeedback `json:"promptFeedback"`
Usage *types.Usage `json:"usage,omitempty"`
}
type GeminiChatCandidate struct {
Content GeminiChatContent `json:"content"`
FinishReason string `json:"finishReason"`
Index int64 `json:"index"`
SafetyRatings []GeminiChatSafetyRating `json:"safetyRatings"`
}
type GeminiChatSafetyRating struct {
Category string `json:"category"`
Probability string `json:"probability"`
}
type GeminiChatPromptFeedback struct {
SafetyRatings []GeminiChatSafetyRating `json:"safetyRatings"`
}
func (g *GeminiChatResponse) GetResponseText() string {
if g == nil {
return ""
}
if len(g.Candidates) > 0 && len(g.Candidates[0].Content.Parts) > 0 {
return g.Candidates[0].Content.Parts[0].Text
}
return ""
}

View File

@@ -19,6 +19,10 @@ func (c *OpenAIProviderImageResponseResponse) ResponseHandler(resp *http.Respons
func (p *OpenAIProvider) ImageGenerationsAction(request *types.ImageRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
if isWithinRange(request.Model, request.N) == false {
return nil, common.StringErrorWrapper("n_not_within_range", "n_not_within_range", http.StatusBadRequest)
}
requestBody, err := p.GetRequestBody(&request, isModelMapped)
if err != nil {
return nil, common.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
@@ -47,3 +51,13 @@ func (p *OpenAIProvider) ImageGenerationsAction(request *types.ImageRequest, isM
return
}
func isWithinRange(element string, value int) bool {
if _, ok := common.DalleGenerationImageAmounts[element]; !ok {
return false
}
min := common.DalleGenerationImageAmounts[element][0]
max := common.DalleGenerationImageAmounts[element][1]
return value >= min && value <= max
}

View File

@@ -46,6 +46,14 @@ func (p *XunfeiProvider) sendRequest(request *types.ChatCompletionRequest, authU
}
}
if len(xunfeiResponse.Payload.Choices.Text) == 0 {
xunfeiResponse.Payload.Choices.Text = []XunfeiChatResponseTextItem{
{
Content: "",
},
}
}
xunfeiResponse.Payload.Choices.Text[0].Content = content
response := p.responseXunfei2OpenAI(&xunfeiResponse)