mirror of
				https://github.com/songquanpeng/one-api.git
				synced 2025-10-31 22:03:41 +08:00 
			
		
		
		
	Compare commits
	
		
			7 Commits
		
	
	
		
			v0.4.10-al
			...
			v0.4.10-al
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 3c940113ab | ||
|  | 0495b9a0d7 | ||
|  | 12a0e7105e | ||
|  | e628b643cd | ||
|  | 675847bf98 | ||
|  | 2ff15baf66 | ||
|  | 4139a7036f | 
| @@ -60,6 +60,7 @@ _✨ All in one 的 OpenAI 接口,整合各种 API 访问方式,开箱即用 | ||||
| ## 功能 | ||||
| 1. 支持多种 API 访问渠道: | ||||
|    + [x] OpenAI 官方通道(支持配置镜像) | ||||
|    + [x] [Anthropic Claude 系列模型](https://anthropic.com) | ||||
|    + [x] **Azure OpenAI API** | ||||
|    + [x] [API Distribute](https://api.gptjk.top/register?aff=QGxj) | ||||
|    + [x] [OpenAI-SB](https://openai-sb.com) | ||||
|   | ||||
| @@ -151,6 +151,7 @@ const ( | ||||
| 	ChannelTypePaLM      = 11 | ||||
| 	ChannelTypeAPI2GPT   = 12 | ||||
| 	ChannelTypeAIGC2D    = 13 | ||||
| 	ChannelTypeAnthropic = 14 | ||||
| ) | ||||
|  | ||||
| var ChannelBaseURLs = []string{ | ||||
| @@ -168,4 +169,5 @@ var ChannelBaseURLs = []string{ | ||||
| 	"",                              // 11 | ||||
| 	"https://api.api2gpt.com",       // 12 | ||||
| 	"https://api.aigc2d.com",        // 13 | ||||
| 	"https://api.anthropic.com",     // 14 | ||||
| } | ||||
|   | ||||
| @@ -36,6 +36,8 @@ var ModelRatio = map[string]float64{ | ||||
| 	"text-moderation-stable":  0.1, | ||||
| 	"text-moderation-latest":  0.1, | ||||
| 	"dall-e":                  8, | ||||
| 	"claude-instant-1":        0.75, | ||||
| 	"claude-2":                30, | ||||
| } | ||||
|  | ||||
| func ModelRatio2JSONString() string { | ||||
|   | ||||
| @@ -7,16 +7,19 @@ import ( | ||||
| ) | ||||
|  | ||||
| func GetSubscription(c *gin.Context) { | ||||
| 	var quota int | ||||
| 	var remainQuota int | ||||
| 	var usedQuota int | ||||
| 	var err error | ||||
| 	var token *model.Token | ||||
| 	if common.DisplayTokenStatEnabled { | ||||
| 		tokenId := c.GetInt("token_id") | ||||
| 		token, err = model.GetTokenById(tokenId) | ||||
| 		quota = token.RemainQuota | ||||
| 		remainQuota = token.RemainQuota | ||||
| 		usedQuota = token.UsedQuota | ||||
| 	} else { | ||||
| 		userId := c.GetInt("id") | ||||
| 		quota, err = model.GetUserQuota(userId) | ||||
| 		remainQuota, err = model.GetUserQuota(userId) | ||||
| 		usedQuota, err = model.GetUserUsedQuota(userId) | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		openAIError := OpenAIError{ | ||||
| @@ -28,6 +31,7 @@ func GetSubscription(c *gin.Context) { | ||||
| 		}) | ||||
| 		return | ||||
| 	} | ||||
| 	quota := remainQuota + usedQuota | ||||
| 	amount := float64(quota) | ||||
| 	if common.DisplayInCurrencyEnabled { | ||||
| 		amount /= common.QuotaPerUnit | ||||
|   | ||||
| @@ -14,7 +14,7 @@ import ( | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| func testChannel(channel *model.Channel, request ChatRequest) error { | ||||
| func testChannel(channel *model.Channel, request ChatRequest) (error, *OpenAIError) { | ||||
| 	switch channel.Type { | ||||
| 	case common.ChannelTypeAzure: | ||||
| 		request.Model = "gpt-35-turbo" | ||||
| @@ -33,11 +33,11 @@ func testChannel(channel *model.Channel, request ChatRequest) error { | ||||
|  | ||||
| 	jsonData, err := json.Marshal(request) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 		return err, nil | ||||
| 	} | ||||
| 	req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(jsonData)) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 		return err, nil | ||||
| 	} | ||||
| 	if channel.Type == common.ChannelTypeAzure { | ||||
| 		req.Header.Set("api-key", channel.Key) | ||||
| @@ -48,18 +48,18 @@ func testChannel(channel *model.Channel, request ChatRequest) error { | ||||
| 	client := &http.Client{} | ||||
| 	resp, err := client.Do(req) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 		return err, nil | ||||
| 	} | ||||
| 	defer resp.Body.Close() | ||||
| 	var response TextResponse | ||||
| 	err = json.NewDecoder(resp.Body).Decode(&response) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 		return err, nil | ||||
| 	} | ||||
| 	if response.Usage.CompletionTokens == 0 { | ||||
| 		return errors.New(fmt.Sprintf("type %s, code %v, message %s", response.Error.Type, response.Error.Code, response.Error.Message)) | ||||
| 		return errors.New(fmt.Sprintf("type %s, code %v, message %s", response.Error.Type, response.Error.Code, response.Error.Message)), &response.Error | ||||
| 	} | ||||
| 	return nil | ||||
| 	return nil, nil | ||||
| } | ||||
|  | ||||
| func buildTestRequest() *ChatRequest { | ||||
| @@ -94,7 +94,7 @@ func TestChannel(c *gin.Context) { | ||||
| 	} | ||||
| 	testRequest := buildTestRequest() | ||||
| 	tik := time.Now() | ||||
| 	err = testChannel(channel, *testRequest) | ||||
| 	err, _ = testChannel(channel, *testRequest) | ||||
| 	tok := time.Now() | ||||
| 	milliseconds := tok.Sub(tik).Milliseconds() | ||||
| 	go channel.UpdateResponseTime(milliseconds) | ||||
| @@ -158,13 +158,14 @@ func testAllChannels(notify bool) error { | ||||
| 				continue | ||||
| 			} | ||||
| 			tik := time.Now() | ||||
| 			err := testChannel(channel, *testRequest) | ||||
| 			err, openaiErr := testChannel(channel, *testRequest) | ||||
| 			tok := time.Now() | ||||
| 			milliseconds := tok.Sub(tik).Milliseconds() | ||||
| 			if err != nil || milliseconds > disableThreshold { | ||||
| 			if milliseconds > disableThreshold { | ||||
| 				err = errors.New(fmt.Sprintf("响应时间 %.2fs 超过阈值 %.2fs", float64(milliseconds)/1000.0, float64(disableThreshold)/1000.0)) | ||||
| 				disableChannel(channel.Id, channel.Name, err.Error()) | ||||
| 			} | ||||
| 			if shouldDisableChannel(openaiErr) { | ||||
| 				disableChannel(channel.Id, channel.Name, err.Error()) | ||||
| 			} | ||||
| 			channel.UpdateResponseTime(milliseconds) | ||||
|   | ||||
| @@ -270,6 +270,24 @@ func init() { | ||||
| 			Root:       "ChatGLM2", | ||||
| 			Parent:     nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			Id:         "claude-instant-1", | ||||
| 			Object:     "model", | ||||
| 			Created:    1677649963, | ||||
| 			OwnedBy:    "anturopic", | ||||
| 			Permission: permission, | ||||
| 			Root:       "claude-instant-1", | ||||
| 			Parent:     nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			Id:         "claude-2", | ||||
| 			Object:     "model", | ||||
| 			Created:    1677649963, | ||||
| 			OwnedBy:    "anturopic", | ||||
| 			Permission: permission, | ||||
| 			Root:       "claude-2", | ||||
| 			Parent:     nil, | ||||
| 		}, | ||||
| 	} | ||||
| 	openAIModelsMap = make(map[string]OpenAIModels) | ||||
| 	for _, model := range openAIModels { | ||||
|   | ||||
							
								
								
									
										221
									
								
								controller/relay-claude.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										221
									
								
								controller/relay-claude.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,221 @@ | ||||
| package controller | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"github.com/gin-gonic/gin" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"one-api/common" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| type ClaudeMetadata struct { | ||||
| 	UserId string `json:"user_id"` | ||||
| } | ||||
|  | ||||
| type ClaudeRequest struct { | ||||
| 	Model             string   `json:"model"` | ||||
| 	Prompt            string   `json:"prompt"` | ||||
| 	MaxTokensToSample int      `json:"max_tokens_to_sample"` | ||||
| 	StopSequences     []string `json:"stop_sequences,omitempty"` | ||||
| 	Temperature       float64  `json:"temperature,omitempty"` | ||||
| 	TopP              float64  `json:"top_p,omitempty"` | ||||
| 	TopK              int      `json:"top_k,omitempty"` | ||||
| 	//ClaudeMetadata    `json:"metadata,omitempty"` | ||||
| 	Stream bool `json:"stream,omitempty"` | ||||
| } | ||||
|  | ||||
| type ClaudeError struct { | ||||
| 	Type    string `json:"type"` | ||||
| 	Message string `json:"message"` | ||||
| } | ||||
|  | ||||
| type ClaudeResponse struct { | ||||
| 	Completion string      `json:"completion"` | ||||
| 	StopReason string      `json:"stop_reason"` | ||||
| 	Model      string      `json:"model"` | ||||
| 	Error      ClaudeError `json:"error"` | ||||
| } | ||||
|  | ||||
| func stopReasonClaude2OpenAI(reason string) string { | ||||
| 	switch reason { | ||||
| 	case "stop_sequence": | ||||
| 		return "stop" | ||||
| 	case "max_tokens": | ||||
| 		return "length" | ||||
| 	default: | ||||
| 		return reason | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func requestOpenAI2Claude(textRequest GeneralOpenAIRequest) *ClaudeRequest { | ||||
| 	claudeRequest := ClaudeRequest{ | ||||
| 		Model:             textRequest.Model, | ||||
| 		Prompt:            "", | ||||
| 		MaxTokensToSample: textRequest.MaxTokens, | ||||
| 		StopSequences:     nil, | ||||
| 		Temperature:       textRequest.Temperature, | ||||
| 		TopP:              textRequest.TopP, | ||||
| 		Stream:            textRequest.Stream, | ||||
| 	} | ||||
| 	if claudeRequest.MaxTokensToSample == 0 { | ||||
| 		claudeRequest.MaxTokensToSample = 1000000 | ||||
| 	} | ||||
| 	prompt := "" | ||||
| 	for _, message := range textRequest.Messages { | ||||
| 		if message.Role == "user" { | ||||
| 			prompt += fmt.Sprintf("\n\nHuman: %s", message.Content) | ||||
| 		} else if message.Role == "assistant" { | ||||
| 			prompt += fmt.Sprintf("\n\nAssistant: %s", message.Content) | ||||
| 		} else { | ||||
| 			// ignore other roles | ||||
| 		} | ||||
| 		prompt += "\n\nAssistant:" | ||||
| 	} | ||||
| 	claudeRequest.Prompt = prompt | ||||
| 	return &claudeRequest | ||||
| } | ||||
|  | ||||
| func streamResponseClaude2OpenAI(claudeResponse *ClaudeResponse) *ChatCompletionsStreamResponse { | ||||
| 	var choice ChatCompletionsStreamResponseChoice | ||||
| 	choice.Delta.Content = claudeResponse.Completion | ||||
| 	choice.FinishReason = stopReasonClaude2OpenAI(claudeResponse.StopReason) | ||||
| 	var response ChatCompletionsStreamResponse | ||||
| 	response.Object = "chat.completion.chunk" | ||||
| 	response.Model = claudeResponse.Model | ||||
| 	response.Choices = []ChatCompletionsStreamResponseChoice{choice} | ||||
| 	return &response | ||||
| } | ||||
|  | ||||
| func responseClaude2OpenAI(claudeResponse *ClaudeResponse) *OpenAITextResponse { | ||||
| 	choice := OpenAITextResponseChoice{ | ||||
| 		Index: 0, | ||||
| 		Message: Message{ | ||||
| 			Role:    "assistant", | ||||
| 			Content: strings.TrimPrefix(claudeResponse.Completion, " "), | ||||
| 			Name:    nil, | ||||
| 		}, | ||||
| 		FinishReason: stopReasonClaude2OpenAI(claudeResponse.StopReason), | ||||
| 	} | ||||
| 	fullTextResponse := OpenAITextResponse{ | ||||
| 		Id:      fmt.Sprintf("chatcmpl-%s", common.GetUUID()), | ||||
| 		Object:  "chat.completion", | ||||
| 		Created: common.GetTimestamp(), | ||||
| 		Choices: []OpenAITextResponseChoice{choice}, | ||||
| 	} | ||||
| 	return &fullTextResponse | ||||
| } | ||||
|  | ||||
| func claudeStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, string) { | ||||
| 	responseText := "" | ||||
| 	responseId := fmt.Sprintf("chatcmpl-%s", common.GetUUID()) | ||||
| 	createdTime := common.GetTimestamp() | ||||
| 	scanner := bufio.NewScanner(resp.Body) | ||||
| 	scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { | ||||
| 		if atEOF && len(data) == 0 { | ||||
| 			return 0, nil, nil | ||||
| 		} | ||||
| 		if i := strings.Index(string(data), "\r\n\r\n"); i >= 0 { | ||||
| 			return i + 4, data[0:i], nil | ||||
| 		} | ||||
| 		if atEOF { | ||||
| 			return len(data), data, nil | ||||
| 		} | ||||
| 		return 0, nil, nil | ||||
| 	}) | ||||
| 	dataChan := make(chan string) | ||||
| 	stopChan := make(chan bool) | ||||
| 	go func() { | ||||
| 		for scanner.Scan() { | ||||
| 			data := scanner.Text() | ||||
| 			if !strings.HasPrefix(data, "event: completion") { | ||||
| 				continue | ||||
| 			} | ||||
| 			data = strings.TrimPrefix(data, "event: completion\r\ndata: ") | ||||
| 			dataChan <- data | ||||
| 		} | ||||
| 		stopChan <- true | ||||
| 	}() | ||||
| 	c.Writer.Header().Set("Content-Type", "text/event-stream") | ||||
| 	c.Writer.Header().Set("Cache-Control", "no-cache") | ||||
| 	c.Writer.Header().Set("Connection", "keep-alive") | ||||
| 	c.Writer.Header().Set("Transfer-Encoding", "chunked") | ||||
| 	c.Writer.Header().Set("X-Accel-Buffering", "no") | ||||
| 	c.Stream(func(w io.Writer) bool { | ||||
| 		select { | ||||
| 		case data := <-dataChan: | ||||
| 			// some implementations may add \r at the end of data | ||||
| 			data = strings.TrimSuffix(data, "\r") | ||||
| 			var claudeResponse ClaudeResponse | ||||
| 			err := json.Unmarshal([]byte(data), &claudeResponse) | ||||
| 			if err != nil { | ||||
| 				common.SysError("error unmarshalling stream response: " + err.Error()) | ||||
| 				return true | ||||
| 			} | ||||
| 			responseText += claudeResponse.Completion | ||||
| 			response := streamResponseClaude2OpenAI(&claudeResponse) | ||||
| 			response.Id = responseId | ||||
| 			response.Created = createdTime | ||||
| 			jsonStr, err := json.Marshal(response) | ||||
| 			if err != nil { | ||||
| 				common.SysError("error marshalling stream response: " + err.Error()) | ||||
| 				return true | ||||
| 			} | ||||
| 			c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonStr)}) | ||||
| 			return true | ||||
| 		case <-stopChan: | ||||
| 			c.Render(-1, common.CustomEvent{Data: "data: [DONE]"}) | ||||
| 			return false | ||||
| 		} | ||||
| 	}) | ||||
| 	err := resp.Body.Close() | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), "" | ||||
| 	} | ||||
| 	return nil, responseText | ||||
| } | ||||
|  | ||||
| func claudeHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) { | ||||
| 	responseBody, err := io.ReadAll(resp.Body) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil | ||||
| 	} | ||||
| 	err = resp.Body.Close() | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil | ||||
| 	} | ||||
| 	var claudeResponse ClaudeResponse | ||||
| 	err = json.Unmarshal(responseBody, &claudeResponse) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil | ||||
| 	} | ||||
| 	if claudeResponse.Error.Type != "" { | ||||
| 		return &OpenAIErrorWithStatusCode{ | ||||
| 			OpenAIError: OpenAIError{ | ||||
| 				Message: claudeResponse.Error.Message, | ||||
| 				Type:    claudeResponse.Error.Type, | ||||
| 				Param:   "", | ||||
| 				Code:    claudeResponse.Error.Type, | ||||
| 			}, | ||||
| 			StatusCode: resp.StatusCode, | ||||
| 		}, nil | ||||
| 	} | ||||
| 	fullTextResponse := responseClaude2OpenAI(&claudeResponse) | ||||
| 	completionTokens := countTokenText(claudeResponse.Completion, model) | ||||
| 	usage := Usage{ | ||||
| 		PromptTokens:     promptTokens, | ||||
| 		CompletionTokens: completionTokens, | ||||
| 		TotalTokens:      promptTokens + completionTokens, | ||||
| 	} | ||||
| 	fullTextResponse.Usage = usage | ||||
| 	jsonResponse, err := json.Marshal(fullTextResponse) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil | ||||
| 	} | ||||
| 	c.Writer.Header().Set("Content-Type", "application/json") | ||||
| 	c.Writer.WriteHeader(resp.StatusCode) | ||||
| 	_, err = c.Writer.Write(jsonResponse) | ||||
| 	return nil, &usage | ||||
| } | ||||
							
								
								
									
										133
									
								
								controller/relay-openai.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										133
									
								
								controller/relay-openai.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,133 @@ | ||||
| package controller | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"encoding/json" | ||||
| 	"github.com/gin-gonic/gin" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"one-api/common" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| func openaiStreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*OpenAIErrorWithStatusCode, string) { | ||||
| 	responseText := "" | ||||
| 	scanner := bufio.NewScanner(resp.Body) | ||||
| 	scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { | ||||
| 		if atEOF && len(data) == 0 { | ||||
| 			return 0, nil, nil | ||||
| 		} | ||||
| 		if i := strings.Index(string(data), "\n"); i >= 0 { | ||||
| 			return i + 1, data[0:i], nil | ||||
| 		} | ||||
| 		if atEOF { | ||||
| 			return len(data), data, nil | ||||
| 		} | ||||
| 		return 0, nil, nil | ||||
| 	}) | ||||
| 	dataChan := make(chan string) | ||||
| 	stopChan := make(chan bool) | ||||
| 	go func() { | ||||
| 		for scanner.Scan() { | ||||
| 			data := scanner.Text() | ||||
| 			if len(data) < 6 { // ignore blank line or wrong format | ||||
| 				continue | ||||
| 			} | ||||
| 			dataChan <- data | ||||
| 			data = data[6:] | ||||
| 			if !strings.HasPrefix(data, "[DONE]") { | ||||
| 				switch relayMode { | ||||
| 				case RelayModeChatCompletions: | ||||
| 					var streamResponse ChatCompletionsStreamResponse | ||||
| 					err := json.Unmarshal([]byte(data), &streamResponse) | ||||
| 					if err != nil { | ||||
| 						common.SysError("error unmarshalling stream response: " + err.Error()) | ||||
| 						return | ||||
| 					} | ||||
| 					for _, choice := range streamResponse.Choices { | ||||
| 						responseText += choice.Delta.Content | ||||
| 					} | ||||
| 				case RelayModeCompletions: | ||||
| 					var streamResponse CompletionsStreamResponse | ||||
| 					err := json.Unmarshal([]byte(data), &streamResponse) | ||||
| 					if err != nil { | ||||
| 						common.SysError("error unmarshalling stream response: " + err.Error()) | ||||
| 						return | ||||
| 					} | ||||
| 					for _, choice := range streamResponse.Choices { | ||||
| 						responseText += choice.Text | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		stopChan <- true | ||||
| 	}() | ||||
| 	c.Writer.Header().Set("Content-Type", "text/event-stream") | ||||
| 	c.Writer.Header().Set("Cache-Control", "no-cache") | ||||
| 	c.Writer.Header().Set("Connection", "keep-alive") | ||||
| 	c.Writer.Header().Set("Transfer-Encoding", "chunked") | ||||
| 	c.Writer.Header().Set("X-Accel-Buffering", "no") | ||||
| 	c.Stream(func(w io.Writer) bool { | ||||
| 		select { | ||||
| 		case data := <-dataChan: | ||||
| 			if strings.HasPrefix(data, "data: [DONE]") { | ||||
| 				data = data[:12] | ||||
| 			} | ||||
| 			// some implementations may add \r at the end of data | ||||
| 			data = strings.TrimSuffix(data, "\r") | ||||
| 			c.Render(-1, common.CustomEvent{Data: data}) | ||||
| 			return true | ||||
| 		case <-stopChan: | ||||
| 			return false | ||||
| 		} | ||||
| 	}) | ||||
| 	err := resp.Body.Close() | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), "" | ||||
| 	} | ||||
| 	return nil, responseText | ||||
| } | ||||
|  | ||||
| func openaiHandler(c *gin.Context, resp *http.Response, consumeQuota bool) (*OpenAIErrorWithStatusCode, *Usage) { | ||||
| 	var textResponse TextResponse | ||||
| 	if consumeQuota { | ||||
| 		responseBody, err := io.ReadAll(resp.Body) | ||||
| 		if err != nil { | ||||
| 			return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil | ||||
| 		} | ||||
| 		err = resp.Body.Close() | ||||
| 		if err != nil { | ||||
| 			return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil | ||||
| 		} | ||||
| 		err = json.Unmarshal(responseBody, &textResponse) | ||||
| 		if err != nil { | ||||
| 			return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil | ||||
| 		} | ||||
| 		if textResponse.Error.Type != "" { | ||||
| 			return &OpenAIErrorWithStatusCode{ | ||||
| 				OpenAIError: textResponse.Error, | ||||
| 				StatusCode:  resp.StatusCode, | ||||
| 			}, nil | ||||
| 		} | ||||
| 		// Reset response body | ||||
| 		resp.Body = io.NopCloser(bytes.NewBuffer(responseBody)) | ||||
| 	} | ||||
| 	// We shouldn't set the header before we parse the response body, because the parse part may fail. | ||||
| 	// And then we will have to send an error response, but in this case, the header has already been set. | ||||
| 	// So the client will be confused by the response. | ||||
| 	// For example, Postman will report error, and we cannot check the response at all. | ||||
| 	for k, v := range resp.Header { | ||||
| 		c.Writer.Header().Set(k, v[0]) | ||||
| 	} | ||||
| 	c.Writer.WriteHeader(resp.StatusCode) | ||||
| 	_, err := io.Copy(c.Writer, resp.Body) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError), nil | ||||
| 	} | ||||
| 	err = resp.Body.Close() | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil | ||||
| 	} | ||||
| 	return nil, &textResponse.Usage | ||||
| } | ||||
| @@ -1,7 +1,6 @@ | ||||
| package controller | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"encoding/json" | ||||
| 	"errors" | ||||
| @@ -15,6 +14,12 @@ import ( | ||||
| 	"github.com/gin-gonic/gin" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	APITypeOpenAI = iota | ||||
| 	APITypeClaude | ||||
| 	APITypePaLM | ||||
| ) | ||||
|  | ||||
| func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 	channelType := c.GetInt("channel") | ||||
| 	tokenId := c.GetInt("token_id") | ||||
| @@ -71,12 +76,18 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 			isModelMapped = true | ||||
| 		} | ||||
| 	} | ||||
| 	apiType := APITypeOpenAI | ||||
| 	if strings.HasPrefix(textRequest.Model, "claude") { | ||||
| 		apiType = APITypeClaude | ||||
| 	} | ||||
| 	baseURL := common.ChannelBaseURLs[channelType] | ||||
| 	requestURL := c.Request.URL.String() | ||||
| 	if c.GetString("base_url") != "" { | ||||
| 		baseURL = c.GetString("base_url") | ||||
| 	} | ||||
| 	fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL) | ||||
| 	switch apiType { | ||||
| 	case APITypeOpenAI: | ||||
| 		if channelType == common.ChannelTypeAzure { | ||||
| 			// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api | ||||
| 			query := c.Request.URL.Query() | ||||
| @@ -95,9 +106,12 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 			model_ = strings.TrimSuffix(model_, "-0314") | ||||
| 			model_ = strings.TrimSuffix(model_, "-0613") | ||||
| 			fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/%s", baseURL, model_, task) | ||||
| 	} else if channelType == common.ChannelTypePaLM { | ||||
| 		err := relayPaLM(textRequest, c) | ||||
| 		return err | ||||
| 		} | ||||
| 	case APITypeClaude: | ||||
| 		fullRequestURL = "https://api.anthropic.com/v1/complete" | ||||
| 		if baseURL != "" { | ||||
| 			fullRequestURL = fmt.Sprintf("%s/v1/complete", baseURL) | ||||
| 		} | ||||
| 	} | ||||
| 	var promptTokens int | ||||
| 	var completionTokens int | ||||
| @@ -142,17 +156,36 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 	} else { | ||||
| 		requestBody = c.Request.Body | ||||
| 	} | ||||
| 	switch apiType { | ||||
| 	case APITypeClaude: | ||||
| 		claudeRequest := requestOpenAI2Claude(textRequest) | ||||
| 		jsonStr, err := json.Marshal(claudeRequest) | ||||
| 		if err != nil { | ||||
| 			return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError) | ||||
| 		} | ||||
| 		requestBody = bytes.NewBuffer(jsonStr) | ||||
| 	} | ||||
| 	req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "new_request_failed", http.StatusInternalServerError) | ||||
| 	} | ||||
| 	apiKey := c.Request.Header.Get("Authorization") | ||||
| 	apiKey = strings.TrimPrefix(apiKey, "Bearer ") | ||||
| 	switch apiType { | ||||
| 	case APITypeOpenAI: | ||||
| 		if channelType == common.ChannelTypeAzure { | ||||
| 		key := c.Request.Header.Get("Authorization") | ||||
| 		key = strings.TrimPrefix(key, "Bearer ") | ||||
| 		req.Header.Set("api-key", key) | ||||
| 			req.Header.Set("api-key", apiKey) | ||||
| 		} else { | ||||
| 			req.Header.Set("Authorization", c.Request.Header.Get("Authorization")) | ||||
| 		} | ||||
| 	case APITypeClaude: | ||||
| 		req.Header.Set("x-api-key", apiKey) | ||||
| 		anthropicVersion := c.Request.Header.Get("anthropic-version") | ||||
| 		if anthropicVersion == "" { | ||||
| 			anthropicVersion = "2023-06-01" | ||||
| 		} | ||||
| 		req.Header.Set("anthropic-version", anthropicVersion) | ||||
| 	} | ||||
| 	req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type")) | ||||
| 	req.Header.Set("Accept", c.Request.Header.Get("Accept")) | ||||
| 	//req.Header.Set("Connection", c.Request.Header.Get("Connection")) | ||||
| @@ -219,124 +252,40 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 			} | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
| 	switch apiType { | ||||
| 	case APITypeOpenAI: | ||||
| 		if isStream { | ||||
| 		scanner := bufio.NewScanner(resp.Body) | ||||
| 		scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { | ||||
| 			if atEOF && len(data) == 0 { | ||||
| 				return 0, nil, nil | ||||
| 			} | ||||
|  | ||||
| 			if i := strings.Index(string(data), "\n"); i >= 0 { | ||||
| 				return i + 1, data[0:i], nil | ||||
| 			} | ||||
|  | ||||
| 			if atEOF { | ||||
| 				return len(data), data, nil | ||||
| 			} | ||||
|  | ||||
| 			return 0, nil, nil | ||||
| 		}) | ||||
| 		dataChan := make(chan string) | ||||
| 		stopChan := make(chan bool) | ||||
| 		go func() { | ||||
| 			for scanner.Scan() { | ||||
| 				data := scanner.Text() | ||||
| 				if len(data) < 6 { // ignore blank line or wrong format | ||||
| 					continue | ||||
| 				} | ||||
| 				dataChan <- data | ||||
| 				data = data[6:] | ||||
| 				if !strings.HasPrefix(data, "[DONE]") { | ||||
| 					switch relayMode { | ||||
| 					case RelayModeChatCompletions: | ||||
| 						var streamResponse ChatCompletionsStreamResponse | ||||
| 						err = json.Unmarshal([]byte(data), &streamResponse) | ||||
| 			err, responseText := openaiStreamHandler(c, resp, relayMode) | ||||
| 			if err != nil { | ||||
| 							common.SysError("error unmarshalling stream response: " + err.Error()) | ||||
| 							return | ||||
| 						} | ||||
| 						for _, choice := range streamResponse.Choices { | ||||
| 							streamResponseText += choice.Delta.Content | ||||
| 						} | ||||
| 					case RelayModeCompletions: | ||||
| 						var streamResponse CompletionsStreamResponse | ||||
| 						err = json.Unmarshal([]byte(data), &streamResponse) | ||||
| 						if err != nil { | ||||
| 							common.SysError("error unmarshalling stream response: " + err.Error()) | ||||
| 							return | ||||
| 						} | ||||
| 						for _, choice := range streamResponse.Choices { | ||||
| 							streamResponseText += choice.Text | ||||
| 						} | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 			stopChan <- true | ||||
| 		}() | ||||
| 		c.Writer.Header().Set("Content-Type", "text/event-stream") | ||||
| 		c.Writer.Header().Set("Cache-Control", "no-cache") | ||||
| 		c.Writer.Header().Set("Connection", "keep-alive") | ||||
| 		c.Writer.Header().Set("Transfer-Encoding", "chunked") | ||||
| 		c.Writer.Header().Set("X-Accel-Buffering", "no") | ||||
| 		c.Stream(func(w io.Writer) bool { | ||||
| 			select { | ||||
| 			case data := <-dataChan: | ||||
| 				if strings.HasPrefix(data, "data: [DONE]") { | ||||
| 					data = data[:12] | ||||
| 				} | ||||
| 				// some implementations may add \r at the end of data | ||||
| 				data = strings.TrimSuffix(data, "\r") | ||||
| 				c.Render(-1, common.CustomEvent{Data: data}) | ||||
| 				return true | ||||
| 			case <-stopChan: | ||||
| 				return false | ||||
| 			} | ||||
| 		}) | ||||
| 		err = resp.Body.Close() | ||||
| 		if err != nil { | ||||
| 			return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError) | ||||
| 				return err | ||||
| 			} | ||||
| 			streamResponseText = responseText | ||||
| 			return nil | ||||
| 		} else { | ||||
| 		if consumeQuota { | ||||
| 			responseBody, err := io.ReadAll(resp.Body) | ||||
| 			err, usage := openaiHandler(c, resp, consumeQuota) | ||||
| 			if err != nil { | ||||
| 				return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError) | ||||
| 			} | ||||
| 			err = resp.Body.Close() | ||||
| 			if err != nil { | ||||
| 				return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError) | ||||
| 			} | ||||
| 			err = json.Unmarshal(responseBody, &textResponse) | ||||
| 			if err != nil { | ||||
| 				return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError) | ||||
| 			} | ||||
| 			if textResponse.Error.Type != "" { | ||||
| 				return &OpenAIErrorWithStatusCode{ | ||||
| 					OpenAIError: textResponse.Error, | ||||
| 					StatusCode:  resp.StatusCode, | ||||
| 				} | ||||
| 			} | ||||
| 			// Reset response body | ||||
| 			resp.Body = io.NopCloser(bytes.NewBuffer(responseBody)) | ||||
| 		} | ||||
| 		// We shouldn't set the header before we parse the response body, because the parse part may fail. | ||||
| 		// And then we will have to send an error response, but in this case, the header has already been set. | ||||
| 		// So the client will be confused by the response. | ||||
| 		// For example, Postman will report error, and we cannot check the response at all. | ||||
| 		for k, v := range resp.Header { | ||||
| 			c.Writer.Header().Set(k, v[0]) | ||||
| 		} | ||||
| 		c.Writer.WriteHeader(resp.StatusCode) | ||||
| 		_, err = io.Copy(c.Writer, resp.Body) | ||||
| 		if err != nil { | ||||
| 			return errorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError) | ||||
| 		} | ||||
| 		err = resp.Body.Close() | ||||
| 		if err != nil { | ||||
| 			return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError) | ||||
| 				return err | ||||
| 			} | ||||
| 			textResponse.Usage = *usage | ||||
| 			return nil | ||||
| 		} | ||||
| 	case APITypeClaude: | ||||
| 		if isStream { | ||||
| 			err, responseText := claudeStreamHandler(c, resp) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			streamResponseText = responseText | ||||
| 			return nil | ||||
| 		} else { | ||||
| 			err, usage := claudeHandler(c, resp, promptTokens, textRequest.Model) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			textResponse.Usage = *usage | ||||
| 			return nil | ||||
| 		} | ||||
| 	default: | ||||
| 		return errorWrapper(errors.New("unknown api type"), "unknown_api_type", http.StatusInternalServerError) | ||||
| 	} | ||||
| } | ||||
|   | ||||
| @@ -91,3 +91,16 @@ func errorWrapper(err error, code string, statusCode int) *OpenAIErrorWithStatus | ||||
| 		StatusCode:  statusCode, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func shouldDisableChannel(err *OpenAIError) bool { | ||||
| 	if !common.AutomaticDisableChannelEnabled { | ||||
| 		return false | ||||
| 	} | ||||
| 	if err == nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	if err.Type == "insufficient_quota" || err.Code == "invalid_api_key" || err.Code == "account_deactivated" { | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|   | ||||
| @@ -85,6 +85,20 @@ type TextResponse struct { | ||||
| 	Error OpenAIError `json:"error"` | ||||
| } | ||||
|  | ||||
| type OpenAITextResponseChoice struct { | ||||
| 	Index        int `json:"index"` | ||||
| 	Message      `json:"message"` | ||||
| 	FinishReason string `json:"finish_reason"` | ||||
| } | ||||
|  | ||||
| type OpenAITextResponse struct { | ||||
| 	Id      string                     `json:"id"` | ||||
| 	Object  string                     `json:"object"` | ||||
| 	Created int64                      `json:"created"` | ||||
| 	Choices []OpenAITextResponseChoice `json:"choices"` | ||||
| 	Usage   `json:"usage"` | ||||
| } | ||||
|  | ||||
| type ImageResponse struct { | ||||
| 	Created int `json:"created"` | ||||
| 	Data    []struct { | ||||
| @@ -92,13 +106,19 @@ type ImageResponse struct { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type ChatCompletionsStreamResponse struct { | ||||
| 	Choices []struct { | ||||
| type ChatCompletionsStreamResponseChoice struct { | ||||
| 	Delta struct { | ||||
| 		Content string `json:"content"` | ||||
| 	} `json:"delta"` | ||||
| 		FinishReason string `json:"finish_reason"` | ||||
| 	} `json:"choices"` | ||||
| 	FinishReason string `json:"finish_reason,omitempty"` | ||||
| } | ||||
|  | ||||
| type ChatCompletionsStreamResponse struct { | ||||
| 	Id      string                                `json:"id"` | ||||
| 	Object  string                                `json:"object"` | ||||
| 	Created int64                                 `json:"created"` | ||||
| 	Model   string                                `json:"model"` | ||||
| 	Choices []ChatCompletionsStreamResponseChoice `json:"choices"` | ||||
| } | ||||
|  | ||||
| type CompletionsStreamResponse struct { | ||||
| @@ -151,7 +171,7 @@ func Relay(c *gin.Context) { | ||||
| 		channelId := c.GetInt("channel_id") | ||||
| 		common.SysError(fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message)) | ||||
| 		// https://platform.openai.com/docs/guides/error-codes/api-errors | ||||
| 		if common.AutomaticDisableChannelEnabled && (err.Type == "insufficient_quota" || err.Code == "invalid_api_key" || err.Code == "account_deactivated") { | ||||
| 		if shouldDisableChannel(&err.OpenAIError) { | ||||
| 			channelId := c.GetInt("channel_id") | ||||
| 			channelName := c.GetString("channel_name") | ||||
| 			disableChannel(channelId, channelName, err.Message) | ||||
|   | ||||
| @@ -1,5 +1,6 @@ | ||||
| export const CHANNEL_OPTIONS = [ | ||||
|   { key: 1, text: 'OpenAI', value: 1, color: 'green' }, | ||||
|   { key: 14, text: 'Anthropic', value: 14, color: 'black' }, | ||||
|   { key: 8, text: '自定义', value: 8, color: 'pink' }, | ||||
|   { key: 3, text: 'Azure', value: 3, color: 'olive' }, | ||||
|   { key: 2, text: 'API2D', value: 2, color: 'blue' }, | ||||
|   | ||||
| @@ -27,6 +27,7 @@ const EditChannel = () => { | ||||
|   }; | ||||
|   const [batch, setBatch] = useState(false); | ||||
|   const [inputs, setInputs] = useState(originInputs); | ||||
|   const [originModelOptions, setOriginModelOptions] = useState([]); | ||||
|   const [modelOptions, setModelOptions] = useState([]); | ||||
|   const [groupOptions, setGroupOptions] = useState([]); | ||||
|   const [basicModels, setBasicModels] = useState([]); | ||||
| @@ -44,19 +45,6 @@ const EditChannel = () => { | ||||
|         data.models = []; | ||||
|       } else { | ||||
|         data.models = data.models.split(','); | ||||
|         setTimeout(() => { | ||||
|           let localModelOptions = [...modelOptions]; | ||||
|           data.models.forEach((model) => { | ||||
|             if (!localModelOptions.find((option) => option.key === model)) { | ||||
|               localModelOptions.push({ | ||||
|                 key: model, | ||||
|                 text: model, | ||||
|                 value: model | ||||
|               }); | ||||
|             } | ||||
|           }); | ||||
|           setModelOptions(localModelOptions); | ||||
|         }, 1000); | ||||
|       } | ||||
|       if (data.group === '') { | ||||
|         data.groups = []; | ||||
| @@ -76,13 +64,16 @@ const EditChannel = () => { | ||||
|   const fetchModels = async () => { | ||||
|     try { | ||||
|       let res = await API.get(`/api/channel/models`); | ||||
|       setModelOptions(res.data.data.map((model) => ({ | ||||
|       let localModelOptions = res.data.data.map((model) => ({ | ||||
|         key: model.id, | ||||
|         text: model.id, | ||||
|         value: model.id | ||||
|       }))); | ||||
|       })); | ||||
|       setOriginModelOptions(localModelOptions); | ||||
|       setFullModels(res.data.data.map((model) => model.id)); | ||||
|       setBasicModels(res.data.data.filter((model) => !model.id.startsWith('gpt-4')).map((model) => model.id)); | ||||
|       setBasicModels(res.data.data.filter((model) => { | ||||
|         return model.id.startsWith('gpt-3') || model.id.startsWith('text-'); | ||||
|       }).map((model) => model.id)); | ||||
|     } catch (error) { | ||||
|       showError(error.message); | ||||
|     } | ||||
| @@ -101,6 +92,20 @@ const EditChannel = () => { | ||||
|     } | ||||
|   }; | ||||
|  | ||||
|   useEffect(() => { | ||||
|     let localModelOptions = [...originModelOptions]; | ||||
|     inputs.models.forEach((model) => { | ||||
|       if (!localModelOptions.find((option) => option.key === model)) { | ||||
|         localModelOptions.push({ | ||||
|           key: model, | ||||
|           text: model, | ||||
|           value: model | ||||
|         }); | ||||
|       } | ||||
|     }); | ||||
|     setModelOptions(localModelOptions); | ||||
|   }, [originModelOptions, inputs.models]); | ||||
|  | ||||
|   useEffect(() => { | ||||
|     if (isEdit) { | ||||
|       loadChannel().then(); | ||||
| @@ -280,15 +285,20 @@ const EditChannel = () => { | ||||
|             <Input | ||||
|               action={ | ||||
|                 <Button type={'button'} onClick={()=>{ | ||||
|                   if (customModel.trim() === "") return; | ||||
|                   if (inputs.models.includes(customModel)) return; | ||||
|                   let localModels = [...inputs.models]; | ||||
|                   localModels.push(customModel); | ||||
|                   let localModelOptions = [...modelOptions]; | ||||
|                   let localModelOptions = []; | ||||
|                   localModelOptions.push({ | ||||
|                     key: customModel, | ||||
|                     text: customModel, | ||||
|                     value: customModel, | ||||
|                   }); | ||||
|                   setModelOptions(localModelOptions); | ||||
|                   setModelOptions(modelOptions=>{ | ||||
|                     return [...modelOptions, ...localModelOptions]; | ||||
|                   }); | ||||
|                   setCustomModel(''); | ||||
|                   handleInputChange(null, { name: 'models', value: localModels }); | ||||
|                 }}>填入</Button> | ||||
|               } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user