package controller import ( "bytes" "errors" "fmt" "github.com/gin-gonic/gin" "io" "log" "net/http" "one-api/common" "one-api/dto" "one-api/middleware" "one-api/model" "one-api/relay" "one-api/relay/constant" relayconstant "one-api/relay/constant" "one-api/service" "strings" ) func relayHandler(c *gin.Context, relayMode int) *dto.OpenAIErrorWithStatusCode { var err *dto.OpenAIErrorWithStatusCode switch relayMode { case relayconstant.RelayModeImagesGenerations: err = relay.ImageHelper(c, relayMode) case relayconstant.RelayModeAudioSpeech: fallthrough case relayconstant.RelayModeAudioTranslation: fallthrough case relayconstant.RelayModeAudioTranscription: err = relay.AudioHelper(c) case relayconstant.RelayModeRerank: err = relay.RerankHelper(c, relayMode) default: err = relay.TextHelper(c) } return err } func Playground(c *gin.Context) { var openaiErr *dto.OpenAIErrorWithStatusCode defer func() { if openaiErr != nil { c.JSON(openaiErr.StatusCode, gin.H{ "error": openaiErr.Error, }) } }() playgroundRequest := &dto.PlayGroundRequest{} err := common.UnmarshalBodyReusable(c, playgroundRequest) if err != nil { openaiErr = service.OpenAIErrorWrapperLocal(err, "unmarshal_request_failed", http.StatusBadRequest) return } if playgroundRequest.Model == "" { openaiErr = service.OpenAIErrorWrapperLocal(errors.New("请选择模型"), "model_required", http.StatusBadRequest) return } c.Set("original_model", playgroundRequest.Model) group := playgroundRequest.Group if group == "" { group = c.GetString("group") } else { c.Set("group", group) } log.Printf("group: %s", group) log.Printf("model: %s", playgroundRequest.Model) channel, err := model.CacheGetRandomSatisfiedChannel(group, playgroundRequest.Model, 0) if err != nil { openaiErr = service.OpenAIErrorWrapperLocal(err, "get_playground_channel_failed", http.StatusInternalServerError) return } middleware.SetupContextForSelectedChannel(c, channel, playgroundRequest.Model) Relay(c) } func Relay(c *gin.Context) { relayMode := constant.Path2RelayMode(c.Request.URL.Path) requestId := c.GetString(common.RequestIdKey) group := c.GetString("group") originalModel := c.GetString("original_model") var openaiErr *dto.OpenAIErrorWithStatusCode for i := 0; i <= common.RetryTimes; i++ { channel, err := getChannel(c, group, originalModel, i) if err != nil { common.LogError(c, err.Error()) openaiErr = service.OpenAIErrorWrapperLocal(err, "get_channel_failed", http.StatusInternalServerError) break } openaiErr = relayRequest(c, relayMode, channel) if openaiErr == nil { return // 成功处理请求,直接返回 } go processChannelError(c, channel.Id, channel.Type, channel.Name, channel.GetAutoBan(), openaiErr) if !shouldRetry(c, openaiErr, common.RetryTimes-i) { break } } useChannel := c.GetStringSlice("use_channel") if len(useChannel) > 1 { retryLogStr := fmt.Sprintf("重试:%s", strings.Trim(strings.Join(strings.Fields(fmt.Sprint(useChannel)), "->"), "[]")) common.LogInfo(c, retryLogStr) } if openaiErr != nil { if openaiErr.StatusCode == http.StatusTooManyRequests { openaiErr.Error.Message = "当前分组上游负载已饱和,请稍后再试" } openaiErr.Error.Message = common.MessageWithRequestId(openaiErr.Error.Message, requestId) c.JSON(openaiErr.StatusCode, gin.H{ "error": openaiErr.Error, }) } } func relayRequest(c *gin.Context, relayMode int, channel *model.Channel) *dto.OpenAIErrorWithStatusCode { addUsedChannel(c, channel.Id) requestBody, _ := common.GetRequestBody(c) c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody)) return relayHandler(c, relayMode) } func addUsedChannel(c *gin.Context, channelId int) { useChannel := c.GetStringSlice("use_channel") useChannel = append(useChannel, fmt.Sprintf("%d", channelId)) c.Set("use_channel", useChannel) } func getChannel(c *gin.Context, group, originalModel string, retryCount int) (*model.Channel, error) { if retryCount == 0 { autoBan := c.GetBool("auto_ban") autoBanInt := 1 if !autoBan { autoBanInt = 0 } return &model.Channel{ Id: c.GetInt("channel_id"), Type: c.GetInt("channel_type"), Name: c.GetString("channel_name"), AutoBan: &autoBanInt, }, nil } channel, err := model.CacheGetRandomSatisfiedChannel(group, originalModel, retryCount) if err != nil { return nil, errors.New(fmt.Sprintf("获取重试渠道失败: %s", err.Error())) } middleware.SetupContextForSelectedChannel(c, channel, originalModel) return channel, nil } func shouldRetry(c *gin.Context, openaiErr *dto.OpenAIErrorWithStatusCode, retryTimes int) bool { if openaiErr == nil { return false } if openaiErr.LocalError { return false } if retryTimes <= 0 { return false } if _, ok := c.Get("specific_channel_id"); ok { return false } if openaiErr.StatusCode == http.StatusTooManyRequests { return true } if openaiErr.StatusCode == 307 { return true } if openaiErr.StatusCode/100 == 5 { // 超时不重试 if openaiErr.StatusCode == 504 || openaiErr.StatusCode == 524 { return false } return true } if openaiErr.StatusCode == http.StatusBadRequest { channelType := c.GetInt("channel_type") if channelType == common.ChannelTypeAnthropic { return true } return false } if openaiErr.StatusCode == 408 { // azure处理超时不重试 return false } if openaiErr.StatusCode/100 == 2 { return false } return true } func processChannelError(c *gin.Context, channelId int, channelType int, channelName string, autoBan bool, err *dto.OpenAIErrorWithStatusCode) { // 不要使用context获取渠道信息,异步处理时可能会出现渠道信息不一致的情况 // do not use context to get channel info, there may be inconsistent channel info when processing asynchronously common.LogError(c, fmt.Sprintf("relay error (channel #%d, status code: %d): %s", channelId, err.StatusCode, err.Error.Message)) if service.ShouldDisableChannel(channelType, err) && autoBan { service.DisableChannel(channelId, channelName, err.Error.Message) } } func RelayMidjourney(c *gin.Context) { relayMode := c.GetInt("relay_mode") var err *dto.MidjourneyResponse switch relayMode { case relayconstant.RelayModeMidjourneyNotify: err = relay.RelayMidjourneyNotify(c) case relayconstant.RelayModeMidjourneyTaskFetch, relayconstant.RelayModeMidjourneyTaskFetchByCondition: err = relay.RelayMidjourneyTask(c, relayMode) case relayconstant.RelayModeMidjourneyTaskImageSeed: err = relay.RelayMidjourneyTaskImageSeed(c) case relayconstant.RelayModeSwapFace: err = relay.RelaySwapFace(c) default: err = relay.RelayMidjourneySubmit(c, relayMode) } //err = relayMidjourneySubmit(c, relayMode) log.Println(err) if err != nil { statusCode := http.StatusBadRequest if err.Code == 30 { err.Result = "当前分组负载已饱和,请稍后再试,或升级账户以提升服务质量。" statusCode = http.StatusTooManyRequests } c.JSON(statusCode, gin.H{ "description": fmt.Sprintf("%s %s", err.Description, err.Result), "type": "upstream_error", "code": err.Code, }) channelId := c.GetInt("channel_id") common.LogError(c, fmt.Sprintf("relay error (channel #%d, status code %d): %s", channelId, statusCode, fmt.Sprintf("%s %s", err.Description, err.Result))) } } func RelayNotImplemented(c *gin.Context) { err := dto.OpenAIError{ Message: "API not implemented", Type: "new_api_error", Param: "", Code: "api_not_implemented", } c.JSON(http.StatusNotImplemented, gin.H{ "error": err, }) } func RelayNotFound(c *gin.Context) { err := dto.OpenAIError{ Message: fmt.Sprintf("Invalid URL (%s %s)", c.Request.Method, c.Request.URL.Path), Type: "invalid_request_error", Param: "", Code: "", } c.JSON(http.StatusNotFound, gin.H{ "error": err, }) } func RelayTask(c *gin.Context) { retryTimes := common.RetryTimes channelId := c.GetInt("channel_id") relayMode := c.GetInt("relay_mode") group := c.GetString("group") originalModel := c.GetString("original_model") c.Set("use_channel", []string{fmt.Sprintf("%d", channelId)}) taskErr := taskRelayHandler(c, relayMode) if taskErr == nil { retryTimes = 0 } for i := 0; shouldRetryTaskRelay(c, channelId, taskErr, retryTimes) && i < retryTimes; i++ { channel, err := model.CacheGetRandomSatisfiedChannel(group, originalModel, i) if err != nil { common.LogError(c, fmt.Sprintf("CacheGetRandomSatisfiedChannel failed: %s", err.Error())) break } channelId = channel.Id useChannel := c.GetStringSlice("use_channel") useChannel = append(useChannel, fmt.Sprintf("%d", channelId)) c.Set("use_channel", useChannel) common.LogInfo(c, fmt.Sprintf("using channel #%d to retry (remain times %d)", channel.Id, i)) middleware.SetupContextForSelectedChannel(c, channel, originalModel) requestBody, err := common.GetRequestBody(c) c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody)) taskErr = taskRelayHandler(c, relayMode) } useChannel := c.GetStringSlice("use_channel") if len(useChannel) > 1 { retryLogStr := fmt.Sprintf("重试:%s", strings.Trim(strings.Join(strings.Fields(fmt.Sprint(useChannel)), "->"), "[]")) common.LogInfo(c, retryLogStr) } if taskErr != nil { if taskErr.StatusCode == http.StatusTooManyRequests { taskErr.Message = "当前分组上游负载已饱和,请稍后再试" } c.JSON(taskErr.StatusCode, taskErr) } } func taskRelayHandler(c *gin.Context, relayMode int) *dto.TaskError { var err *dto.TaskError switch relayMode { case relayconstant.RelayModeSunoFetch, relayconstant.RelayModeSunoFetchByID: err = relay.RelayTaskFetch(c, relayMode) default: err = relay.RelayTaskSubmit(c, relayMode) } return err } func shouldRetryTaskRelay(c *gin.Context, channelId int, taskErr *dto.TaskError, retryTimes int) bool { if taskErr == nil { return false } if retryTimes <= 0 { return false } if _, ok := c.Get("specific_channel_id"); ok { return false } if taskErr.StatusCode == http.StatusTooManyRequests { return true } if taskErr.StatusCode == 307 { return true } if taskErr.StatusCode/100 == 5 { // 超时不重试 if taskErr.StatusCode == 504 || taskErr.StatusCode == 524 { return false } return true } if taskErr.StatusCode == http.StatusBadRequest { return false } if taskErr.StatusCode == 408 { // azure处理超时不重试 return false } if taskErr.LocalError { return false } if taskErr.StatusCode/100 == 2 { return false } return true }