mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-12-25 09:15:56 +08:00
Merge remote-tracking branch 'origin/upstream/main'
This commit is contained in:
@@ -5,14 +5,15 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func testChannel(channel *model.Channel, request ChatRequest) (err error, openaiErr *OpenAIError) {
|
||||
@@ -43,16 +44,14 @@ func testChannel(channel *model.Channel, request ChatRequest) (err error, openai
|
||||
}
|
||||
requestURL := common.ChannelBaseURLs[channel.Type]
|
||||
if channel.Type == common.ChannelTypeAzure {
|
||||
requestURL = fmt.Sprintf("%s/openai/deployments/%s/chat/completions?api-version=2023-03-15-preview", channel.GetBaseURL(), request.Model)
|
||||
requestURL = getFullRequestURL(channel.GetBaseURL(), fmt.Sprintf("/openai/deployments/%s/chat/completions?api-version=2023-03-15-preview", request.Model), channel.Type)
|
||||
} else {
|
||||
if channel.GetBaseURL() != "" {
|
||||
requestURL = channel.GetBaseURL()
|
||||
if baseURL := channel.GetBaseURL(); len(baseURL) > 0 {
|
||||
requestURL = baseURL
|
||||
}
|
||||
requestURL += "/v1/chat/completions"
|
||||
}
|
||||
// for Cloudflare AI gateway: https://github.com/songquanpeng/one-api/pull/639
|
||||
requestURL = strings.Replace(requestURL, "/v1/v1", "/v1", 1)
|
||||
|
||||
requestURL = getFullRequestURL(requestURL, "/v1/chat/completions", channel.Type)
|
||||
}
|
||||
jsonData, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
@@ -73,11 +72,18 @@ func testChannel(channel *model.Channel, request ChatRequest) (err error, openai
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var response TextResponse
|
||||
err = json.NewDecoder(resp.Body).Decode(&response)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
err = json.Unmarshal(body, &response)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error: %s\nResp body: %s", err, body), nil
|
||||
}
|
||||
if response.Usage.CompletionTokens == 0 {
|
||||
if response.Error.Message == "" {
|
||||
response.Error.Message = "补全 tokens 非预期返回 0"
|
||||
}
|
||||
return errors.New(fmt.Sprintf("type %s, code %v, message %s", response.Error.Type, response.Error.Code, response.Error.Message)), &response.Error
|
||||
}
|
||||
return nil, nil
|
||||
@@ -139,20 +145,32 @@ func TestChannel(c *gin.Context) {
|
||||
var testAllChannelsLock sync.Mutex
|
||||
var testAllChannelsRunning bool = false
|
||||
|
||||
// disable & notify
|
||||
func disableChannel(channelId int, channelName string, reason string) {
|
||||
func notifyRootUser(subject string, content string) {
|
||||
if common.RootUserEmail == "" {
|
||||
common.RootUserEmail = model.GetRootUserEmail()
|
||||
}
|
||||
model.UpdateChannelStatusById(channelId, common.ChannelStatusAutoDisabled)
|
||||
subject := fmt.Sprintf("通道「%s」(#%d)已被禁用", channelName, channelId)
|
||||
content := fmt.Sprintf("通道「%s」(#%d)已被禁用,原因:%s", channelName, channelId, reason)
|
||||
err := common.SendEmail(subject, common.RootUserEmail, content)
|
||||
if err != nil {
|
||||
common.SysError(fmt.Sprintf("failed to send email: %s", err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
// disable & notify
|
||||
func disableChannel(channelId int, channelName string, reason string) {
|
||||
model.UpdateChannelStatusById(channelId, common.ChannelStatusAutoDisabled)
|
||||
subject := fmt.Sprintf("通道「%s」(#%d)已被禁用", channelName, channelId)
|
||||
content := fmt.Sprintf("通道「%s」(#%d)已被禁用,原因:%s", channelName, channelId, reason)
|
||||
notifyRootUser(subject, content)
|
||||
}
|
||||
|
||||
// enable & notify
|
||||
func enableChannel(channelId int, channelName string) {
|
||||
model.UpdateChannelStatusById(channelId, common.ChannelStatusEnabled)
|
||||
subject := fmt.Sprintf("通道「%s」(#%d)已被启用", channelName, channelId)
|
||||
content := fmt.Sprintf("通道「%s」(#%d)已被启用", channelName, channelId)
|
||||
notifyRootUser(subject, content)
|
||||
}
|
||||
|
||||
func testAllChannels(notify bool) error {
|
||||
if common.RootUserEmail == "" {
|
||||
common.RootUserEmail = model.GetRootUserEmail()
|
||||
@@ -175,20 +193,21 @@ func testAllChannels(notify bool) error {
|
||||
}
|
||||
go func() {
|
||||
for _, channel := range channels {
|
||||
if channel.Status != common.ChannelStatusEnabled {
|
||||
continue
|
||||
}
|
||||
isChannelEnabled := channel.Status == common.ChannelStatusEnabled
|
||||
tik := time.Now()
|
||||
err, openaiErr := testChannel(channel, *testRequest)
|
||||
tok := time.Now()
|
||||
milliseconds := tok.Sub(tik).Milliseconds()
|
||||
if milliseconds > disableThreshold {
|
||||
if isChannelEnabled && milliseconds > disableThreshold {
|
||||
err = errors.New(fmt.Sprintf("响应时间 %.2fs 超过阈值 %.2fs", float64(milliseconds)/1000.0, float64(disableThreshold)/1000.0))
|
||||
disableChannel(channel.Id, channel.Name, err.Error())
|
||||
}
|
||||
if shouldDisableChannel(openaiErr, -1) {
|
||||
if isChannelEnabled && shouldDisableChannel(openaiErr, -1) {
|
||||
disableChannel(channel.Id, channel.Name, err.Error())
|
||||
}
|
||||
if !isChannelEnabled && shouldEnableChannel(err, openaiErr) {
|
||||
enableChannel(channel.Id, channel.Name)
|
||||
}
|
||||
channel.UpdateResponseTime(milliseconds)
|
||||
time.Sleep(common.RequestInterval)
|
||||
}
|
||||
|
||||
@@ -55,12 +55,21 @@ func init() {
|
||||
// https://platform.openai.com/docs/models/model-endpoint-compatibility
|
||||
openAIModels = []OpenAIModels{
|
||||
{
|
||||
Id: "dall-e",
|
||||
Id: "dall-e-2",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "dall-e",
|
||||
Root: "dall-e-2",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "dall-e-3",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "dall-e-3",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
@@ -72,6 +81,42 @@ func init() {
|
||||
Root: "whisper-1",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "tts-1",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "tts-1",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "tts-1-1106",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "tts-1-1106",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "tts-1-hd",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "tts-1-hd",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "tts-1-hd-1106",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "tts-1-hd-1106",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-3.5-turbo",
|
||||
Object: "model",
|
||||
@@ -315,6 +360,24 @@ func init() {
|
||||
Root: "claude-2",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "claude-2.1",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "anthropic",
|
||||
Permission: permission,
|
||||
Root: "claude-2.1",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "claude-2.0",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "anthropic",
|
||||
Permission: permission,
|
||||
Root: "claude-2.0",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "ERNIE-Bot",
|
||||
Object: "model",
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"strconv"
|
||||
@@ -49,23 +48,8 @@ type AIProxyLibraryStreamResponse struct {
|
||||
|
||||
func requestOpenAI2AIProxyLibrary(request GeneralOpenAIRequest) *AIProxyLibraryRequest {
|
||||
query := ""
|
||||
|
||||
if request.MessagesLen() != 0 {
|
||||
if msgs, err := request.TextMessages(); err == nil {
|
||||
query = msgs[len(msgs)-1].Content
|
||||
} else if msgs, err := request.VisionMessages(); err == nil {
|
||||
lastMsg := msgs[len(msgs)-1]
|
||||
if len(lastMsg.Content) != 0 {
|
||||
for i := range lastMsg.Content {
|
||||
if lastMsg.Content[i].Type == OpenaiVisionMessageContentTypeText {
|
||||
query = lastMsg.Content[i].Text
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Panicf("unknown message type: %T", msgs)
|
||||
}
|
||||
if len(request.Messages) != 0 {
|
||||
query = request.Messages[len(request.Messages)-1].StringContent()
|
||||
}
|
||||
|
||||
return &AIProxyLibraryRequest{
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
@@ -21,16 +23,41 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
||||
channelId := c.GetInt("channel_id")
|
||||
userId := c.GetInt("id")
|
||||
group := c.GetString("group")
|
||||
tokenName := c.GetString("token_name")
|
||||
|
||||
var ttsRequest TextToSpeechRequest
|
||||
if relayMode == RelayModeAudioSpeech {
|
||||
// Read JSON
|
||||
err := common.UnmarshalBodyReusable(c, &ttsRequest)
|
||||
// Check if JSON is valid
|
||||
if err != nil {
|
||||
return errorWrapper(err, "invalid_json", http.StatusBadRequest)
|
||||
}
|
||||
audioModel = ttsRequest.Model
|
||||
// Check if text is too long 4096
|
||||
if len(ttsRequest.Input) > 4096 {
|
||||
return errorWrapper(errors.New("input is too long (over 4096 characters)"), "text_too_long", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
preConsumedTokens := common.PreConsumedQuota
|
||||
modelRatio := common.GetModelRatio(audioModel)
|
||||
groupRatio := common.GetGroupRatio(group)
|
||||
ratio := modelRatio * groupRatio
|
||||
preConsumedQuota := int(float64(preConsumedTokens) * ratio)
|
||||
var quota int
|
||||
var preConsumedQuota int
|
||||
switch relayMode {
|
||||
case RelayModeAudioSpeech:
|
||||
preConsumedQuota = int(float64(len(ttsRequest.Input)) * ratio)
|
||||
quota = preConsumedQuota
|
||||
default:
|
||||
preConsumedQuota = int(float64(common.PreConsumedQuota) * ratio)
|
||||
}
|
||||
userQuota, err := model.CacheGetUserQuota(userId)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// Check if user quota is enough
|
||||
if userQuota-preConsumedQuota < 0 {
|
||||
return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||
}
|
||||
@@ -70,13 +97,34 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
||||
}
|
||||
|
||||
fullRequestURL := getFullRequestURL(baseURL, requestURL, channelType)
|
||||
requestBody := c.Request.Body
|
||||
if relayMode == RelayModeAudioTranscription && channelType == common.ChannelTypeAzure {
|
||||
// https://learn.microsoft.com/en-us/azure/ai-services/openai/whisper-quickstart?tabs=command-line#rest-api
|
||||
apiVersion := GetAPIVersion(c)
|
||||
fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/audio/transcriptions?api-version=%s", baseURL, audioModel, apiVersion)
|
||||
}
|
||||
|
||||
requestBody := &bytes.Buffer{}
|
||||
_, err = io.Copy(requestBody, c.Request.Body)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "new_request_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody.Bytes()))
|
||||
responseFormat := c.DefaultPostForm("response_format", "json")
|
||||
|
||||
req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
req.Header.Set("Authorization", c.Request.Header.Get("Authorization"))
|
||||
|
||||
if relayMode == RelayModeAudioTranscription && channelType == common.ChannelTypeAzure {
|
||||
// https://learn.microsoft.com/en-us/azure/ai-services/openai/whisper-quickstart?tabs=command-line#rest-api
|
||||
apiKey := c.Request.Header.Get("Authorization")
|
||||
apiKey = strings.TrimPrefix(apiKey, "Bearer ")
|
||||
req.Header.Set("api-key", apiKey)
|
||||
req.ContentLength = c.Request.ContentLength
|
||||
} else {
|
||||
req.Header.Set("Authorization", c.Request.Header.Get("Authorization"))
|
||||
}
|
||||
req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
|
||||
req.Header.Set("Accept", c.Request.Header.Get("Accept"))
|
||||
|
||||
@@ -93,47 +141,65 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
var audioResponse AudioResponse
|
||||
|
||||
if relayMode != RelayModeAudioSpeech {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
var openAIErr TextResponse
|
||||
if err = json.Unmarshal(responseBody, &openAIErr); err == nil {
|
||||
if openAIErr.Error.Message != "" {
|
||||
return errorWrapper(fmt.Errorf("type %s, code %v, message %s", openAIErr.Error.Type, openAIErr.Error.Code, openAIErr.Error.Message), "request_error", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
var text string
|
||||
switch responseFormat {
|
||||
case "json":
|
||||
text, err = getTextFromJSON(responseBody)
|
||||
case "text":
|
||||
text, err = getTextFromText(responseBody)
|
||||
case "srt":
|
||||
text, err = getTextFromSRT(responseBody)
|
||||
case "verbose_json":
|
||||
text, err = getTextFromVerboseJSON(responseBody)
|
||||
case "vtt":
|
||||
text, err = getTextFromVTT(responseBody)
|
||||
default:
|
||||
return errorWrapper(errors.New("unexpected_response_format"), "unexpected_response_format", http.StatusInternalServerError)
|
||||
}
|
||||
if err != nil {
|
||||
return errorWrapper(err, "get_text_from_body_err", http.StatusInternalServerError)
|
||||
}
|
||||
quota = countTokenText(text, audioModel)
|
||||
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if preConsumedQuota > 0 {
|
||||
// we need to roll back the pre-consumed quota
|
||||
defer func(ctx context.Context) {
|
||||
go func() {
|
||||
// negative means add quota back for token & user
|
||||
err := model.PostConsumeTokenQuota(tokenId, -preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, fmt.Sprintf("error rollback pre-consumed quota: %s", err.Error()))
|
||||
}
|
||||
}()
|
||||
}(c.Request.Context())
|
||||
}
|
||||
return relayErrorHandler(resp)
|
||||
}
|
||||
quotaDelta := quota - preConsumedQuota
|
||||
defer func(ctx context.Context) {
|
||||
go func() {
|
||||
quota := countTokenText(audioResponse.Text, audioModel)
|
||||
quotaDelta := quota - preConsumedQuota
|
||||
err := model.PostConsumeTokenQuota(tokenId, quotaDelta)
|
||||
if err != nil {
|
||||
common.SysError("error consuming token remain quota: " + err.Error())
|
||||
}
|
||||
err = model.CacheUpdateUserQuota(userId)
|
||||
if err != nil {
|
||||
common.SysError("error update user quota cache: " + err.Error())
|
||||
}
|
||||
if quota != 0 {
|
||||
tokenName := c.GetString("token_name")
|
||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, audioModel, tokenName, quota, logContent)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
channelId := c.GetInt("channel_id")
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
}
|
||||
}()
|
||||
go postConsumeQuota(ctx, tokenId, quotaDelta, quota, userId, channelId, modelRatio, groupRatio, audioModel, tokenName)
|
||||
}(c.Request.Context())
|
||||
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
|
||||
if err != nil {
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &audioResponse)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
||||
|
||||
for k, v := range resp.Header {
|
||||
c.Writer.Header().Set(k, v[0])
|
||||
}
|
||||
@@ -149,3 +215,48 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getTextFromVTT(body []byte) (string, error) {
|
||||
return getTextFromSRT(body)
|
||||
}
|
||||
|
||||
func getTextFromVerboseJSON(body []byte) (string, error) {
|
||||
var whisperResponse WhisperVerboseJSONResponse
|
||||
if err := json.Unmarshal(body, &whisperResponse); err != nil {
|
||||
return "", fmt.Errorf("unmarshal_response_body_failed err :%w", err)
|
||||
}
|
||||
return whisperResponse.Text, nil
|
||||
}
|
||||
|
||||
func getTextFromSRT(body []byte) (string, error) {
|
||||
scanner := bufio.NewScanner(strings.NewReader(string(body)))
|
||||
var builder strings.Builder
|
||||
var textLine bool
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if textLine {
|
||||
builder.WriteString(line)
|
||||
textLine = false
|
||||
continue
|
||||
} else if strings.Contains(line, "-->") {
|
||||
textLine = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return builder.String(), nil
|
||||
}
|
||||
|
||||
func getTextFromText(body []byte) (string, error) {
|
||||
return strings.TrimSuffix(string(body), "\n"), nil
|
||||
}
|
||||
|
||||
func getTextFromJSON(body []byte) (string, error) {
|
||||
var whisperResponse WhisperJSONResponse
|
||||
if err := json.Unmarshal(body, &whisperResponse); err != nil {
|
||||
return "", fmt.Errorf("unmarshal_response_body_failed err :%w", err)
|
||||
}
|
||||
return whisperResponse.Text, nil
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"strings"
|
||||
@@ -67,18 +66,20 @@ func requestOpenAI2Claude(textRequest GeneralOpenAIRequest) *ClaudeRequest {
|
||||
}
|
||||
prompt := ""
|
||||
|
||||
messages, err := textRequest.TextMessages()
|
||||
if err != nil {
|
||||
log.Panicf("invalid message type: %T", textRequest.Messages)
|
||||
}
|
||||
// messages, err := textRequest.TextMessages()
|
||||
// if err != nil {
|
||||
// log.Panicf("invalid message type: %T", textRequest.Messages)
|
||||
// }
|
||||
|
||||
for _, message := range messages {
|
||||
for _, message := range textRequest.Messages {
|
||||
if message.Role == "user" {
|
||||
prompt += fmt.Sprintf("\n\nHuman: %s", message.Content)
|
||||
} else if message.Role == "assistant" {
|
||||
prompt += fmt.Sprintf("\n\nAssistant: %s", message.Content)
|
||||
} else if message.Role == "system" {
|
||||
prompt += fmt.Sprintf("\n\nSystem: %s", message.Content)
|
||||
if prompt == "" {
|
||||
prompt = message.StringContent()
|
||||
}
|
||||
}
|
||||
}
|
||||
prompt += "\n\nAssistant:"
|
||||
|
||||
@@ -6,44 +6,80 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func isWithinRange(element string, value int) bool {
|
||||
if _, ok := common.DalleGenerationImageAmounts[element]; !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
min := common.DalleGenerationImageAmounts[element][0]
|
||||
max := common.DalleGenerationImageAmounts[element][1]
|
||||
|
||||
return value >= min && value <= max
|
||||
}
|
||||
|
||||
func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
imageModel := "dall-e"
|
||||
imageModel := "dall-e-2"
|
||||
imageSize := "1024x1024"
|
||||
|
||||
tokenId := c.GetInt("token_id")
|
||||
channelType := c.GetInt("channel")
|
||||
channelId := c.GetInt("channel_id")
|
||||
userId := c.GetInt("id")
|
||||
consumeQuota := c.GetBool("consume_quota")
|
||||
group := c.GetString("group")
|
||||
|
||||
var imageRequest ImageRequest
|
||||
if consumeQuota {
|
||||
err := common.UnmarshalBodyReusable(c, &imageRequest)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
err := common.UnmarshalBodyReusable(c, &imageRequest)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// Size validation
|
||||
if imageRequest.Size != "" {
|
||||
imageSize = imageRequest.Size
|
||||
}
|
||||
|
||||
// Model validation
|
||||
if imageRequest.Model != "" {
|
||||
imageModel = imageRequest.Model
|
||||
}
|
||||
|
||||
imageCostRatio, hasValidSize := common.DalleSizeRatios[imageModel][imageSize]
|
||||
|
||||
// Check if model is supported
|
||||
if hasValidSize {
|
||||
if imageRequest.Quality == "hd" && imageModel == "dall-e-3" {
|
||||
if imageSize == "1024x1024" {
|
||||
imageCostRatio *= 2
|
||||
} else {
|
||||
imageCostRatio *= 1.5
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return errorWrapper(errors.New("size not supported for this image model"), "size_not_supported", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// Prompt validation
|
||||
if imageRequest.Prompt == "" {
|
||||
return errorWrapper(errors.New("prompt is required"), "required_field_missing", http.StatusBadRequest)
|
||||
return errorWrapper(errors.New("prompt is required"), "prompt_missing", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// Not "256x256", "512x512", or "1024x1024"
|
||||
if imageRequest.Size != "" && imageRequest.Size != "256x256" && imageRequest.Size != "512x512" && imageRequest.Size != "1024x1024" {
|
||||
return errorWrapper(errors.New("size must be one of 256x256, 512x512, or 1024x1024"), "invalid_field_value", http.StatusBadRequest)
|
||||
// Check prompt length
|
||||
if len(imageRequest.Prompt) > common.DalleImagePromptLengthLimitations[imageModel] {
|
||||
return errorWrapper(errors.New("prompt is too long"), "prompt_too_long", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// N should between 1 and 10
|
||||
if imageRequest.N != 0 && (imageRequest.N < 1 || imageRequest.N > 10) {
|
||||
return errorWrapper(errors.New("n must be between 1 and 10"), "invalid_field_value", http.StatusBadRequest)
|
||||
// Number of generated images validation
|
||||
if isWithinRange(imageModel, imageRequest.N) == false {
|
||||
return errorWrapper(errors.New("invalid value of n"), "n_not_within_range", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// map model name
|
||||
@@ -66,8 +102,15 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
||||
baseURL = c.GetString("base_url")
|
||||
}
|
||||
fullRequestURL := getFullRequestURL(baseURL, requestURL, channelType)
|
||||
if channelType == common.ChannelTypeAzure && relayMode == RelayModeImagesGenerations {
|
||||
// https://learn.microsoft.com/en-us/azure/ai-services/openai/dall-e-quickstart?tabs=dalle3%2Ccommand-line&pivots=rest-api
|
||||
apiVersion := GetAPIVersion(c)
|
||||
// https://{resource_name}.openai.azure.com/openai/deployments/dall-e-3/images/generations?api-version=2023-06-01-preview
|
||||
fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/images/generations?api-version=%s", baseURL, imageModel, apiVersion)
|
||||
}
|
||||
|
||||
var requestBody io.Reader
|
||||
if isModelMapped {
|
||||
if isModelMapped || channelType == common.ChannelTypeAzure { // make Azure channel request body
|
||||
jsonStr, err := json.Marshal(imageRequest)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
@@ -82,18 +125,9 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
||||
ratio := modelRatio * groupRatio
|
||||
userQuota, err := model.CacheGetUserQuota(userId)
|
||||
|
||||
sizeRatio := 1.0
|
||||
// Size
|
||||
if imageRequest.Size == "256x256" {
|
||||
sizeRatio = 1
|
||||
} else if imageRequest.Size == "512x512" {
|
||||
sizeRatio = 1.125
|
||||
} else if imageRequest.Size == "1024x1024" {
|
||||
sizeRatio = 1.25
|
||||
}
|
||||
quota := int(ratio*sizeRatio*1000) * imageRequest.N
|
||||
quota := int(ratio*imageCostRatio*1000) * imageRequest.N
|
||||
|
||||
if consumeQuota && userQuota-quota < 0 {
|
||||
if userQuota-quota < 0 {
|
||||
return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||
}
|
||||
|
||||
@@ -101,7 +135,13 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
||||
if err != nil {
|
||||
return errorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
req.Header.Set("Authorization", c.Request.Header.Get("Authorization"))
|
||||
token := c.Request.Header.Get("Authorization")
|
||||
if channelType == common.ChannelTypeAzure { // Azure authentication
|
||||
token = strings.TrimPrefix(token, "Bearer ")
|
||||
req.Header.Set("api-key", token)
|
||||
} else {
|
||||
req.Header.Set("Authorization", token)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
|
||||
req.Header.Set("Accept", c.Request.Header.Get("Accept"))
|
||||
@@ -122,43 +162,39 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
||||
var textResponse ImageResponse
|
||||
|
||||
defer func(ctx context.Context) {
|
||||
if consumeQuota {
|
||||
err := model.PostConsumeTokenQuota(tokenId, quota)
|
||||
if err != nil {
|
||||
common.SysError("error consuming token remain quota: " + err.Error())
|
||||
}
|
||||
err = model.CacheUpdateUserQuota(userId)
|
||||
if err != nil {
|
||||
common.SysError("error update user quota cache: " + err.Error())
|
||||
}
|
||||
if quota != 0 {
|
||||
tokenName := c.GetString("token_name")
|
||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageModel, tokenName, quota, logContent)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
channelId := c.GetInt("channel_id")
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
}
|
||||
err := model.PostConsumeTokenQuota(tokenId, quota)
|
||||
if err != nil {
|
||||
common.SysError("error consuming token remain quota: " + err.Error())
|
||||
}
|
||||
err = model.CacheUpdateUserQuota(userId)
|
||||
if err != nil {
|
||||
common.SysError("error update user quota cache: " + err.Error())
|
||||
}
|
||||
if quota != 0 {
|
||||
tokenName := c.GetString("token_name")
|
||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageModel, tokenName, quota, logContent)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
channelId := c.GetInt("channel_id")
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
}
|
||||
}(c.Request.Context())
|
||||
|
||||
if consumeQuota {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
|
||||
if err != nil {
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &textResponse)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
||||
if err != nil {
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &textResponse)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
||||
|
||||
for k, v := range resp.Header {
|
||||
c.Writer.Header().Set(k, v[0])
|
||||
|
||||
@@ -88,30 +88,29 @@ func openaiStreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*O
|
||||
return nil, responseText
|
||||
}
|
||||
|
||||
func openaiHandler(c *gin.Context, resp *http.Response, consumeQuota bool, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
func openaiHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||
var textResponse TextResponse
|
||||
if consumeQuota {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &textResponse)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
if textResponse.Error.Type != "" {
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: textResponse.Error,
|
||||
StatusCode: resp.StatusCode,
|
||||
}, nil
|
||||
}
|
||||
// Reset response body
|
||||
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
err = json.Unmarshal(responseBody, &textResponse)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
if textResponse.Error.Type != "" {
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: textResponse.Error,
|
||||
StatusCode: resp.StatusCode,
|
||||
}, nil
|
||||
}
|
||||
// Reset response body
|
||||
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
||||
|
||||
// We shouldn't set the header before we parse the response body, because the parse part may fail.
|
||||
// And then we will have to send an error response, but in this case, the header has already been set.
|
||||
// So the httpClient will be confused by the response.
|
||||
@@ -120,7 +119,7 @@ func openaiHandler(c *gin.Context, resp *http.Response, consumeQuota bool, promp
|
||||
c.Writer.Header().Set(k, v[0])
|
||||
}
|
||||
c.Writer.WriteHeader(resp.StatusCode)
|
||||
_, err := io.Copy(c.Writer, resp.Body)
|
||||
_, err = io.Copy(c.Writer, resp.Body)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
@@ -132,7 +131,7 @@ func openaiHandler(c *gin.Context, resp *http.Response, consumeQuota bool, promp
|
||||
if textResponse.Usage.TotalTokens == 0 {
|
||||
completionTokens := 0
|
||||
for _, choice := range textResponse.Choices {
|
||||
completionTokens += countTokenText(choice.Message.Content, model)
|
||||
completionTokens += countTokenText(choice.Message.StringContent(), model)
|
||||
}
|
||||
textResponse.Usage = Usage{
|
||||
PromptTokens: promptTokens,
|
||||
|
||||
@@ -51,14 +51,11 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
channelId := c.GetInt("channel_id")
|
||||
tokenId := c.GetInt("token_id")
|
||||
userId := c.GetInt("id")
|
||||
consumeQuota := c.GetBool("consume_quota")
|
||||
group := c.GetString("group")
|
||||
var textRequest GeneralOpenAIRequest
|
||||
if consumeQuota || channelType == common.ChannelTypeAzure || channelType == common.ChannelTypePaLM {
|
||||
err := common.UnmarshalBodyReusable(c, &textRequest)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
err := common.UnmarshalBodyReusable(c, &textRequest)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
if relayMode == RelayModeModerations && textRequest.Model == "" {
|
||||
textRequest.Model = "text-moderation-latest"
|
||||
@@ -76,7 +73,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
return errorWrapper(errors.New("field prompt is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
case RelayModeChatCompletions:
|
||||
if textRequest.Messages == nil || textRequest.MessagesLen() == 0 {
|
||||
if len(textRequest.Messages) == 0 {
|
||||
return errorWrapper(errors.New("field messages is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
case RelayModeEmbeddings:
|
||||
@@ -132,11 +129,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
case APITypeOpenAI:
|
||||
if channelType == common.ChannelTypeAzure {
|
||||
// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api
|
||||
query := c.Request.URL.Query()
|
||||
apiVersion := query.Get("api-version")
|
||||
if apiVersion == "" {
|
||||
apiVersion = c.GetString("api_version")
|
||||
}
|
||||
apiVersion := GetAPIVersion(c)
|
||||
requestURL := strings.Split(requestURL, "?")[0]
|
||||
requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, apiVersion)
|
||||
baseURL = c.GetString("base_url")
|
||||
@@ -147,7 +140,9 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
model_ = strings.TrimSuffix(model_, "-0301")
|
||||
model_ = strings.TrimSuffix(model_, "-0314")
|
||||
model_ = strings.TrimSuffix(model_, "-0613")
|
||||
fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/%s", baseURL, model_, task)
|
||||
|
||||
requestURL = fmt.Sprintf("/openai/deployments/%s/%s", model_, task)
|
||||
fullRequestURL = getFullRequestURL(baseURL, requestURL, channelType)
|
||||
}
|
||||
case APITypeClaude:
|
||||
fullRequestURL = "https://api.anthropic.com/v1/complete"
|
||||
@@ -182,19 +177,19 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
apiKey := c.Request.Header.Get("Authorization")
|
||||
apiKey = strings.TrimPrefix(apiKey, "Bearer ")
|
||||
fullRequestURL += "?key=" + apiKey
|
||||
case APITypeZhipu:
|
||||
method := "invoke"
|
||||
if textRequest.Stream {
|
||||
method = "sse-invoke"
|
||||
}
|
||||
fullRequestURL = fmt.Sprintf("https://open.bigmodel.cn/api/paas/v3/model-api/%s/%s", textRequest.Model, method)
|
||||
case APITypeAli:
|
||||
fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
|
||||
if relayMode == RelayModeEmbeddings {
|
||||
fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/embeddings/text-embedding/text-embedding"
|
||||
}
|
||||
case APITypeTencent:
|
||||
fullRequestURL = "https://hunyuan.cloud.tencent.com/hyllm/v1/chat/completions"
|
||||
// case APITypeZhipu:
|
||||
// method := "invoke"
|
||||
// if textRequest.Stream {
|
||||
// method = "sse-invoke"
|
||||
// }
|
||||
// fullRequestURL = fmt.Sprintf("https://open.bigmodel.cn/api/paas/v3/model-api/%s/%s", textRequest.Model, method)
|
||||
// case APITypeAli:
|
||||
// fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
|
||||
// if relayMode == RelayModeEmbeddings {
|
||||
// fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/embeddings/text-embedding/text-embedding"
|
||||
// }
|
||||
// case APITypeTencent:
|
||||
// fullRequestURL = "https://hunyuan.cloud.tencent.com/hyllm/v1/chat/completions"
|
||||
case APITypeAIProxyLibrary:
|
||||
fullRequestURL = fmt.Sprintf("%s/api/library/ask", baseURL)
|
||||
}
|
||||
@@ -202,20 +197,21 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
var completionTokens int
|
||||
switch relayMode {
|
||||
case RelayModeChatCompletions:
|
||||
promptTokens = countTokenMessages(textRequest.Messages, textRequest.Model)
|
||||
// first try to parse as text messages
|
||||
if messages, err := textRequest.TextMessages(); err != nil {
|
||||
// then try to parse as vision messages
|
||||
if messages, err := textRequest.VisionMessages(); err != nil {
|
||||
return errorWrapper(err, "parse_text_messages_failed", http.StatusBadRequest)
|
||||
} else {
|
||||
// vision message
|
||||
if promptTokens, err = countVisonTokenMessages(messages, textRequest.Model); err != nil {
|
||||
return errorWrapper(err, "count_token_messages_failed", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
promptTokens = countTokenMessages(messages, textRequest.Model)
|
||||
}
|
||||
// if messages, err := textRequest.TextMessages(); err != nil {
|
||||
// // then try to parse as vision messages
|
||||
// if messages, err := textRequest.VisionMessages(); err != nil {
|
||||
// return errorWrapper(err, "parse_text_messages_failed", http.StatusBadRequest)
|
||||
// } else {
|
||||
// // vision message
|
||||
// if promptTokens, err = countVisonTokenMessages(messages, textRequest.Model); err != nil {
|
||||
// return errorWrapper(err, "count_token_messages_failed", http.StatusInternalServerError)
|
||||
// }
|
||||
// }
|
||||
// } else {
|
||||
// promptTokens = countTokenMessages(messages, textRequest.Model)
|
||||
// }
|
||||
case RelayModeCompletions:
|
||||
promptTokens = countTokenInput(textRequest.Prompt, textRequest.Model)
|
||||
case RelayModeModerations:
|
||||
@@ -246,7 +242,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
preConsumedQuota = 0
|
||||
common.LogInfo(c.Request.Context(), fmt.Sprintf("user %d has enough quota %d, trusted and no need to pre-consume", userId, userQuota))
|
||||
}
|
||||
if consumeQuota && preConsumedQuota > 0 {
|
||||
if preConsumedQuota > 0 {
|
||||
err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
|
||||
if err != nil {
|
||||
return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
|
||||
@@ -378,8 +374,13 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
// if textRequest.Stream {
|
||||
// req.Header.Set("X-DashScope-SSE", "enable")
|
||||
// }
|
||||
// if c.GetString("plugin") != "" {
|
||||
// req.Header.Set("X-DashScope-Plugin", c.GetString("plugin"))
|
||||
// }
|
||||
// case APITypeTencent:
|
||||
// req.Header.Set("Authorization", apiKey)
|
||||
case APITypePaLM:
|
||||
// do not set Authorization header
|
||||
default:
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
}
|
||||
@@ -423,37 +424,36 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
defer func(ctx context.Context) {
|
||||
// c.Writer.Flush()
|
||||
go func() {
|
||||
if consumeQuota {
|
||||
quota := 0
|
||||
completionRatio := common.GetCompletionRatio(textRequest.Model)
|
||||
promptTokens = textResponse.Usage.PromptTokens
|
||||
completionTokens = textResponse.Usage.CompletionTokens
|
||||
quota = int(math.Ceil((float64(promptTokens) + float64(completionTokens)*completionRatio) * ratio))
|
||||
if ratio != 0 && quota <= 0 {
|
||||
quota = 1
|
||||
}
|
||||
totalTokens := promptTokens + completionTokens
|
||||
if totalTokens == 0 {
|
||||
// in this case, must be some error happened
|
||||
// we cannot just return, because we may have to return the pre-consumed quota
|
||||
quota = 0
|
||||
}
|
||||
quotaDelta := quota - preConsumedQuota
|
||||
err := model.PostConsumeTokenQuota(tokenId, quotaDelta)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error consuming token remain quota: "+err.Error())
|
||||
}
|
||||
err = model.CacheUpdateUserQuota(userId)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error update user quota cache: "+err.Error())
|
||||
}
|
||||
if quota != 0 {
|
||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, promptTokens, completionTokens, textRequest.Model, tokenName, quota, logContent)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
}
|
||||
quota := 0
|
||||
completionRatio := common.GetCompletionRatio(textRequest.Model)
|
||||
promptTokens = textResponse.Usage.PromptTokens
|
||||
completionTokens = textResponse.Usage.CompletionTokens
|
||||
quota = int(math.Ceil((float64(promptTokens) + float64(completionTokens)*completionRatio) * ratio))
|
||||
if ratio != 0 && quota <= 0 {
|
||||
quota = 1
|
||||
}
|
||||
totalTokens := promptTokens + completionTokens
|
||||
if totalTokens == 0 {
|
||||
// in this case, must be some error happened
|
||||
// we cannot just return, because we may have to return the pre-consumed quota
|
||||
quota = 0
|
||||
}
|
||||
quotaDelta := quota - preConsumedQuota
|
||||
err := model.PostConsumeTokenQuota(tokenId, quotaDelta)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error consuming token remain quota: "+err.Error())
|
||||
}
|
||||
err = model.CacheUpdateUserQuota(userId)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error update user quota cache: "+err.Error())
|
||||
}
|
||||
if quota != 0 {
|
||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, promptTokens, completionTokens, textRequest.Model, tokenName, quota, logContent)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
}
|
||||
|
||||
}()
|
||||
}(c.Request.Context())
|
||||
switch apiType {
|
||||
@@ -467,7 +467,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
|
||||
return nil
|
||||
} else {
|
||||
err, usage := openaiHandler(c, resp, consumeQuota, promptTokens, textRequest.Model)
|
||||
err, usage := openaiHandler(c, resp, promptTokens, textRequest.Model)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package controller
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -11,6 +12,8 @@ import (
|
||||
"math"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/common/image"
|
||||
"one-api/model"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -138,7 +141,33 @@ func countTokenMessages(messages []Message, model string) int {
|
||||
tokenNum := 0
|
||||
for _, message := range messages {
|
||||
tokenNum += tokensPerMessage
|
||||
tokenNum += getTokenNum(tokenEncoder, message.Content)
|
||||
switch v := message.Content.(type) {
|
||||
case string:
|
||||
tokenNum += getTokenNum(tokenEncoder, v)
|
||||
case []any:
|
||||
for _, it := range v {
|
||||
m := it.(map[string]any)
|
||||
switch m["type"] {
|
||||
case "text":
|
||||
tokenNum += getTokenNum(tokenEncoder, m["text"].(string))
|
||||
case "image_url":
|
||||
imageUrl, ok := m["image_url"].(map[string]any)
|
||||
if ok {
|
||||
url := imageUrl["url"].(string)
|
||||
detail := ""
|
||||
if imageUrl["detail"] != nil {
|
||||
detail = imageUrl["detail"].(string)
|
||||
}
|
||||
imageTokens, err := countImageTokens(url, detail)
|
||||
if err != nil {
|
||||
common.SysError("error counting image tokens: " + err.Error())
|
||||
} else {
|
||||
tokenNum += imageTokens
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
tokenNum += getTokenNum(tokenEncoder, message.Role)
|
||||
if message.Name != nil {
|
||||
tokenNum += tokensPerName
|
||||
@@ -196,13 +225,81 @@ func countVisonTokenMessages(messages []VisionMessage, model string) (int, error
|
||||
return tokenNum, nil
|
||||
}
|
||||
|
||||
const (
|
||||
lowDetailCost = 85
|
||||
highDetailCostPerTile = 170
|
||||
additionalCost = 85
|
||||
)
|
||||
|
||||
// https://platform.openai.com/docs/guides/vision/calculating-costs
|
||||
// https://github.com/openai/openai-cookbook/blob/05e3f9be4c7a2ae7ecf029a7c32065b024730ebe/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
func countImageTokens(url string, detail string) (_ int, err error) {
|
||||
var fetchSize = true
|
||||
var width, height int
|
||||
// Reference: https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding
|
||||
// detail == "auto" is undocumented on how it works, it just said the model will use the auto setting which will look at the image input size and decide if it should use the low or high setting.
|
||||
// According to the official guide, "low" disable the high-res model,
|
||||
// and only receive low-res 512px x 512px version of the image, indicating
|
||||
// that image is treated as low-res when size is smaller than 512px x 512px,
|
||||
// then we can assume that image size larger than 512px x 512px is treated
|
||||
// as high-res. Then we have the following logic:
|
||||
// if detail == "" || detail == "auto" {
|
||||
// width, height, err = image.GetImageSize(url)
|
||||
// if err != nil {
|
||||
// return 0, err
|
||||
// }
|
||||
// fetchSize = false
|
||||
// // not sure if this is correct
|
||||
// if width > 512 || height > 512 {
|
||||
// detail = "high"
|
||||
// } else {
|
||||
// detail = "low"
|
||||
// }
|
||||
// }
|
||||
|
||||
// However, in my test, it seems to be always the same as "high".
|
||||
// The following image, which is 125x50, is still treated as high-res, taken
|
||||
// 255 tokens in the response of non-stream chat completion api.
|
||||
// https://upload.wikimedia.org/wikipedia/commons/1/10/18_Infantry_Division_Messina.jpg
|
||||
if detail == "" || detail == "auto" {
|
||||
// assume by test, not sure if this is correct
|
||||
detail = "high"
|
||||
}
|
||||
switch detail {
|
||||
case "low":
|
||||
return lowDetailCost, nil
|
||||
case "high":
|
||||
if fetchSize {
|
||||
width, height, err = image.GetImageSize(url)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if width > 2048 || height > 2048 { // max(width, height) > 2048
|
||||
ratio := float64(2048) / math.Max(float64(width), float64(height))
|
||||
width = int(float64(width) * ratio)
|
||||
height = int(float64(height) * ratio)
|
||||
}
|
||||
if width > 768 && height > 768 { // min(width, height) > 768
|
||||
ratio := float64(768) / math.Min(float64(width), float64(height))
|
||||
width = int(float64(width) * ratio)
|
||||
height = int(float64(height) * ratio)
|
||||
}
|
||||
numSquares := int(math.Ceil(float64(width)/512) * math.Ceil(float64(height)/512))
|
||||
result := numSquares*highDetailCostPerTile + additionalCost
|
||||
return result, nil
|
||||
default:
|
||||
return 0, errors.New("invalid detail option")
|
||||
}
|
||||
}
|
||||
|
||||
func countTokenInput(input any, model string) int {
|
||||
switch input.(type) {
|
||||
switch v := input.(type) {
|
||||
case string:
|
||||
return countTokenText(input.(string), model)
|
||||
return countTokenText(v, model)
|
||||
case []string:
|
||||
text := ""
|
||||
for _, s := range input.([]string) {
|
||||
for _, s := range v {
|
||||
text += s
|
||||
}
|
||||
return countTokenText(text, model)
|
||||
@@ -243,6 +340,19 @@ func shouldDisableChannel(err *OpenAIError, statusCode int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func shouldEnableChannel(err error, openAIErr *OpenAIError) bool {
|
||||
if !common.AutomaticEnableChannelEnabled {
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if openAIErr != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func setEventStreamHeaders(c *gin.Context) {
|
||||
c.Writer.Header().Set("Content-Type", "text/event-stream")
|
||||
c.Writer.Header().Set("Cache-Control", "no-cache")
|
||||
@@ -280,10 +390,45 @@ func relayErrorHandler(resp *http.Response) (openAIErrorWithStatusCode *OpenAIEr
|
||||
|
||||
func getFullRequestURL(baseURL string, requestURL string, channelType int) string {
|
||||
fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
|
||||
if channelType == common.ChannelTypeOpenAI {
|
||||
if strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") {
|
||||
|
||||
if strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") {
|
||||
switch channelType {
|
||||
case common.ChannelTypeOpenAI:
|
||||
fullRequestURL = fmt.Sprintf("%s%s", baseURL, strings.TrimPrefix(requestURL, "/v1"))
|
||||
case common.ChannelTypeAzure:
|
||||
fullRequestURL = fmt.Sprintf("%s%s", baseURL, strings.TrimPrefix(requestURL, "/openai/deployments"))
|
||||
}
|
||||
}
|
||||
return fullRequestURL
|
||||
}
|
||||
|
||||
func postConsumeQuota(ctx context.Context, tokenId int, quotaDelta int, totalQuota int, userId int, channelId int, modelRatio float64, groupRatio float64, modelName string, tokenName string) {
|
||||
// quotaDelta is remaining quota to be consumed
|
||||
err := model.PostConsumeTokenQuota(tokenId, quotaDelta)
|
||||
if err != nil {
|
||||
common.SysError("error consuming token remain quota: " + err.Error())
|
||||
}
|
||||
err = model.CacheUpdateUserQuota(userId)
|
||||
if err != nil {
|
||||
common.SysError("error update user quota cache: " + err.Error())
|
||||
}
|
||||
// totalQuota is total quota consumed
|
||||
if totalQuota != 0 {
|
||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, totalQuota, 0, modelName, tokenName, totalQuota, logContent)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, totalQuota)
|
||||
model.UpdateChannelUsedQuota(channelId, totalQuota)
|
||||
}
|
||||
if totalQuota <= 0 {
|
||||
common.LogError(ctx, fmt.Sprintf("totalQuota consumed is %d, something is wrong", totalQuota))
|
||||
}
|
||||
}
|
||||
|
||||
func GetAPIVersion(c *gin.Context) string {
|
||||
query := c.Request.URL.Query()
|
||||
apiVersion := query.Get("api-version")
|
||||
if apiVersion == "" {
|
||||
apiVersion = c.GetString("api_version")
|
||||
}
|
||||
return apiVersion
|
||||
}
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Laisky/errors/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
Content any `json:"content"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
@@ -56,6 +54,45 @@ type OpenaiVisionMessageContentImageUrl struct {
|
||||
Detail VisionImageResolution `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
type ImageURL struct {
|
||||
Url string `json:"url,omitempty"`
|
||||
Detail string `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
type TextContent struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
}
|
||||
|
||||
type ImageContent struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
ImageURL *ImageURL `json:"image_url,omitempty"`
|
||||
}
|
||||
|
||||
func (m Message) StringContent() string {
|
||||
content, ok := m.Content.(string)
|
||||
if ok {
|
||||
return content
|
||||
}
|
||||
contentList, ok := m.Content.([]any)
|
||||
if ok {
|
||||
var contentStr string
|
||||
for _, contentItem := range contentList {
|
||||
contentMap, ok := contentItem.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if contentMap["type"] == "text" {
|
||||
if subStr, ok := contentMap["text"].(string); ok {
|
||||
contentStr += subStr
|
||||
}
|
||||
}
|
||||
}
|
||||
return contentStr
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
const (
|
||||
RelayModeUnknown = iota
|
||||
RelayModeChatCompletions
|
||||
@@ -64,63 +101,75 @@ const (
|
||||
RelayModeModerations
|
||||
RelayModeImagesGenerations
|
||||
RelayModeEdits
|
||||
RelayModeAudio
|
||||
RelayModeAudioSpeech
|
||||
RelayModeAudioTranscription
|
||||
RelayModeAudioTranslation
|
||||
)
|
||||
|
||||
// https://platform.openai.com/docs/api-reference/chat
|
||||
|
||||
type GeneralOpenAIRequest struct {
|
||||
Model string `json:"model,omitempty"`
|
||||
// Messages maybe []Message or []VisionMessage
|
||||
Messages any `json:"messages,omitempty"`
|
||||
Prompt any `json:"prompt,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
N int `json:"n,omitempty"`
|
||||
Input any `json:"input,omitempty"`
|
||||
Instruction string `json:"instruction,omitempty"`
|
||||
Size string `json:"size,omitempty"`
|
||||
Functions any `json:"functions,omitempty"`
|
||||
type ResponseFormat struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
func (r *GeneralOpenAIRequest) MessagesLen() int {
|
||||
switch msgs := r.Messages.(type) {
|
||||
case []any:
|
||||
return len(msgs)
|
||||
case []Message:
|
||||
return len(msgs)
|
||||
case []VisionMessage:
|
||||
return len(msgs)
|
||||
case []map[string]any:
|
||||
return len(msgs)
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
type GeneralOpenAIRequest struct {
|
||||
Model string `json:"model,omitempty"`
|
||||
Messages []Message `json:"messages,omitempty"`
|
||||
Prompt any `json:"prompt,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
N int `json:"n,omitempty"`
|
||||
Input any `json:"input,omitempty"`
|
||||
Instruction string `json:"instruction,omitempty"`
|
||||
Size string `json:"size,omitempty"`
|
||||
Functions any `json:"functions,omitempty"`
|
||||
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
||||
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
||||
ResponseFormat *ResponseFormat `json:"response_format,omitempty"`
|
||||
Seed float64 `json:"seed,omitempty"`
|
||||
Tools any `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
}
|
||||
|
||||
// func (r *GeneralOpenAIRequest) MessagesLen() int {
|
||||
// switch msgs := r.Messages.(type) {
|
||||
// case []any:
|
||||
// return len(msgs)
|
||||
// case []Message:
|
||||
// return len(msgs)
|
||||
// case []VisionMessage:
|
||||
// return len(msgs)
|
||||
// case []map[string]any:
|
||||
// return len(msgs)
|
||||
// default:
|
||||
// return 0
|
||||
// }
|
||||
// }
|
||||
|
||||
// TextMessages returns messages as []Message
|
||||
func (r *GeneralOpenAIRequest) TextMessages() (messages []Message, err error) {
|
||||
if blob, err := json.Marshal(r.Messages); err != nil {
|
||||
return nil, errors.Wrap(err, "marshal messages failed")
|
||||
} else if err := json.Unmarshal(blob, &messages); err != nil {
|
||||
return nil, errors.Wrapf(err, "unmarshal messages failed %q", string(blob))
|
||||
} else {
|
||||
return messages, nil
|
||||
}
|
||||
}
|
||||
// func (r *GeneralOpenAIRequest) TextMessages() (messages []Message, err error) {
|
||||
// if blob, err := json.Marshal(r.Messages); err != nil {
|
||||
// return nil, errors.Wrap(err, "marshal messages failed")
|
||||
// } else if err := json.Unmarshal(blob, &messages); err != nil {
|
||||
// return nil, errors.Wrapf(err, "unmarshal messages failed %q", string(blob))
|
||||
// } else {
|
||||
// return messages, nil
|
||||
// }
|
||||
// }
|
||||
|
||||
// VisionMessages returns messages as []VisionMessage
|
||||
func (r *GeneralOpenAIRequest) VisionMessages() (messages []VisionMessage, err error) {
|
||||
if blob, err := json.Marshal(r.Messages); err != nil {
|
||||
return nil, errors.Wrap(err, "marshal vision messages failed")
|
||||
} else if err := json.Unmarshal(blob, &messages); err != nil {
|
||||
return nil, errors.Wrapf(err, "unmarshal vision messages failed %q", string(blob))
|
||||
} else {
|
||||
return messages, nil
|
||||
}
|
||||
}
|
||||
// func (r *GeneralOpenAIRequest) VisionMessages() (messages []VisionMessage, err error) {
|
||||
// if blob, err := json.Marshal(r.Messages); err != nil {
|
||||
// return nil, errors.Wrap(err, "marshal vision messages failed")
|
||||
// } else if err := json.Unmarshal(blob, &messages); err != nil {
|
||||
// return nil, errors.Wrapf(err, "unmarshal vision messages failed %q", string(blob))
|
||||
// } else {
|
||||
// return messages, nil
|
||||
// }
|
||||
// }
|
||||
|
||||
func (r GeneralOpenAIRequest) ParseInput() []string {
|
||||
if r.Input == nil {
|
||||
@@ -155,16 +204,51 @@ type TextRequest struct {
|
||||
//Stream bool `json:"stream"`
|
||||
}
|
||||
|
||||
// ImageRequest docs: https://platform.openai.com/docs/api-reference/images/create
|
||||
type ImageRequest struct {
|
||||
Prompt string `json:"prompt"`
|
||||
N int `json:"n"`
|
||||
Size string `json:"size"`
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt" binding:"required"`
|
||||
N int `json:"n,omitempty"`
|
||||
Size string `json:"size,omitempty"`
|
||||
Quality string `json:"quality,omitempty"`
|
||||
ResponseFormat string `json:"response_format,omitempty"`
|
||||
Style string `json:"style,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
}
|
||||
|
||||
type AudioResponse struct {
|
||||
type WhisperJSONResponse struct {
|
||||
Text string `json:"text,omitempty"`
|
||||
}
|
||||
|
||||
type WhisperVerboseJSONResponse struct {
|
||||
Task string `json:"task,omitempty"`
|
||||
Language string `json:"language,omitempty"`
|
||||
Duration float64 `json:"duration,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
Segments []Segment `json:"segments,omitempty"`
|
||||
}
|
||||
|
||||
type Segment struct {
|
||||
Id int `json:"id"`
|
||||
Seek int `json:"seek"`
|
||||
Start float64 `json:"start"`
|
||||
End float64 `json:"end"`
|
||||
Text string `json:"text"`
|
||||
Tokens []int `json:"tokens"`
|
||||
Temperature float64 `json:"temperature"`
|
||||
AvgLogprob float64 `json:"avg_logprob"`
|
||||
CompressionRatio float64 `json:"compression_ratio"`
|
||||
NoSpeechProb float64 `json:"no_speech_prob"`
|
||||
}
|
||||
|
||||
type TextToSpeechRequest struct {
|
||||
Model string `json:"model" binding:"required"`
|
||||
Input string `json:"input" binding:"required"`
|
||||
Voice string `json:"voice" binding:"required"`
|
||||
Speed float64 `json:"speed"`
|
||||
ResponseFormat string `json:"response_format"`
|
||||
}
|
||||
|
||||
type Usage struct {
|
||||
PromptTokens int `json:"prompt_tokens"`
|
||||
CompletionTokens int `json:"completion_tokens"`
|
||||
@@ -261,14 +345,22 @@ func Relay(c *gin.Context) {
|
||||
relayMode = RelayModeImagesGenerations
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/edits") {
|
||||
relayMode = RelayModeEdits
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio") {
|
||||
relayMode = RelayModeAudio
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/speech") {
|
||||
relayMode = RelayModeAudioSpeech
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/transcriptions") {
|
||||
relayMode = RelayModeAudioTranscription
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/translations") {
|
||||
relayMode = RelayModeAudioTranslation
|
||||
}
|
||||
var err *OpenAIErrorWithStatusCode
|
||||
switch relayMode {
|
||||
case RelayModeImagesGenerations:
|
||||
err = relayImageHelper(c, relayMode)
|
||||
case RelayModeAudio:
|
||||
case RelayModeAudioSpeech:
|
||||
fallthrough
|
||||
case RelayModeAudioTranslation:
|
||||
fallthrough
|
||||
case RelayModeAudioTranscription:
|
||||
err = relayAudioHelper(c, relayMode)
|
||||
default:
|
||||
err = relayTextHelper(c, relayMode)
|
||||
|
||||
@@ -2,7 +2,6 @@ package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -11,13 +10,13 @@ import (
|
||||
func TestGeneralOpenAIRequest_TextMessages(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
messages interface{}
|
||||
messages []Message
|
||||
want []Message
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
name: "Test with []any messages",
|
||||
messages: []any{Message{}, Message{}},
|
||||
messages: []Message{Message{}, Message{}},
|
||||
want: []Message{{}, {}},
|
||||
wantErr: nil,
|
||||
},
|
||||
@@ -27,12 +26,6 @@ func TestGeneralOpenAIRequest_TextMessages(t *testing.T) {
|
||||
want: []Message{{}, {}},
|
||||
wantErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Test with invalid message type",
|
||||
messages: "invalid",
|
||||
want: nil,
|
||||
wantErr: fmt.Errorf("invalid message type string"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -47,15 +40,7 @@ func TestGeneralOpenAIRequest_TextMessages(t *testing.T) {
|
||||
err = json.Unmarshal(blob, got)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotMessages, err := got.TextMessages()
|
||||
if tt.wantErr != nil {
|
||||
require.ErrorContains(t, err, "cannot unmarshal string into Go value")
|
||||
return
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, tt.want, gotMessages)
|
||||
require.Equal(t, tt.want, got.Messages)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user