mirror of
				https://github.com/songquanpeng/one-api.git
				synced 2025-10-31 13:53:41 +08:00 
			
		
		
		
	Compare commits
	
		
			11 Commits
		
	
	
		
			v0.5.10-al
			...
			v0.5.10-al
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 67c64e71c8 | ||
|  | 97030e27f8 | ||
|  | 461f5dab56 | ||
|  | af378c59af | ||
|  | bc6769826b | ||
|  | 0fe26cc4bd | ||
|  | 7d6a169669 | ||
|  | 66f06e5d6f | ||
|  | 6acb9537a9 | ||
|  | 7069c49bdf | ||
|  | 58dee76bf7 | 
| @@ -73,13 +73,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用  | ||||
|    + [x] [智谱 ChatGLM 系列模型](https://bigmodel.cn) | ||||
|    + [x] [360 智脑](https://ai.360.cn) | ||||
|    + [x] [腾讯混元大模型](https://cloud.tencent.com/document/product/1729) | ||||
| 2. 支持配置镜像以及众多第三方代理服务: | ||||
|    + [x] [OpenAI-SB](https://openai-sb.com) | ||||
|    + [x] [CloseAI](https://referer.shadowai.xyz/r/2412) | ||||
|    + [x] [API2D](https://api2d.com/r/197971) | ||||
|    + [x] [OhMyGPT](https://aigptx.top?aff=uFpUl2Kf) | ||||
|    + [x] [AI Proxy](https://aiproxy.io/?i=OneAPI) (邀请码:`OneAPI`) | ||||
|    + [x] 自定义渠道:例如各种未收录的第三方代理服务 | ||||
| 2. 支持配置镜像以及众多[第三方代理服务](https://iamazing.cn/page/openai-api-third-party-services)。 | ||||
| 3. 支持通过**负载均衡**的方式访问多个渠道。 | ||||
| 4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。 | ||||
| 5. 支持**多机部署**,[详见此处](#多机部署)。 | ||||
| @@ -371,6 +365,7 @@ graph LR | ||||
|     + `TIKTOKEN_CACHE_DIR`:默认程序启动时会联网下载一些通用的词元的编码,如:`gpt-3.5-turbo`,在一些网络环境不稳定,或者离线情况,可能会导致启动有问题,可以配置此目录缓存数据,可迁移到离线环境。 | ||||
|     + `DATA_GYM_CACHE_DIR`:目前该配置作用与 `TIKTOKEN_CACHE_DIR` 一致,但是优先级没有它高。 | ||||
| 15. `RELAY_TIMEOUT`:中继超时设置,单位为秒,默认不设置超时时间。 | ||||
| 16. `SQLITE_BUSY_TIMEOUT`:SQLite 锁等待超时设置,单位为毫秒,默认 `3000`。 | ||||
|  | ||||
| ### 命令行参数 | ||||
| 1. `--port <port_number>`: 指定服务器监听的端口号,默认为 `3000`。 | ||||
|   | ||||
| @@ -4,3 +4,4 @@ var UsingSQLite = false | ||||
| var UsingPostgreSQL = false | ||||
|  | ||||
| var SQLitePath = "one-api.db" | ||||
| var SQLiteBusyTimeout = GetOrDefault("SQLITE_BUSY_TIMEOUT", 3000) | ||||
|   | ||||
| @@ -88,8 +88,10 @@ var ModelRatio = map[string]float64{ | ||||
| 	"chatglm_pro":               0.7143, // ¥0.01 / 1k tokens | ||||
| 	"chatglm_std":               0.3572, // ¥0.005 / 1k tokens | ||||
| 	"chatglm_lite":              0.1429, // ¥0.002 / 1k tokens | ||||
| 	"qwen-turbo":                0.8572, // ¥0.012 / 1k tokens | ||||
| 	"qwen-plus":                 10,     // ¥0.14 / 1k tokens | ||||
| 	"qwen-turbo":                0.5715, // ¥0.008 / 1k tokens  // https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-thousand-questions-metering-and-billing | ||||
| 	"qwen-plus":                 1.4286, // ¥0.02 / 1k tokens | ||||
| 	"qwen-max":                  1.4286, // ¥0.02 / 1k tokens | ||||
| 	"qwen-max-longcontext":      1.4286, // ¥0.02 / 1k tokens | ||||
| 	"text-embedding-v1":         0.05,   // ¥0.0007 / 1k tokens | ||||
| 	"SparkDesk":                 1.2858, // ¥0.018 / 1k tokens | ||||
| 	"360GPT_S2_V9":              0.8572, // ¥0.012 / 1k tokens | ||||
|   | ||||
| @@ -486,6 +486,24 @@ func init() { | ||||
| 			Root:       "qwen-plus", | ||||
| 			Parent:     nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			Id:         "qwen-max", | ||||
| 			Object:     "model", | ||||
| 			Created:    1677649963, | ||||
| 			OwnedBy:    "ali", | ||||
| 			Permission: permission, | ||||
| 			Root:       "qwen-max", | ||||
| 			Parent:     nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			Id:         "qwen-max-longcontext", | ||||
| 			Object:     "model", | ||||
| 			Created:    1677649963, | ||||
| 			OwnedBy:    "ali", | ||||
| 			Permission: permission, | ||||
| 			Root:       "qwen-max-longcontext", | ||||
| 			Parent:     nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			Id:         "text-embedding-v1", | ||||
| 			Object:     "model", | ||||
|   | ||||
| @@ -13,13 +13,13 @@ import ( | ||||
| // https://help.aliyun.com/document_detail/613695.html?spm=a2c4g.2399480.0.0.1adb778fAdzP9w#341800c0f8w0r | ||||
|  | ||||
| type AliMessage struct { | ||||
| 	User string `json:"user"` | ||||
| 	Bot  string `json:"bot"` | ||||
| 	Content string `json:"content"` | ||||
| 	Role    string `json:"role"` | ||||
| } | ||||
|  | ||||
| type AliInput struct { | ||||
| 	Prompt  string       `json:"prompt"` | ||||
| 	History []AliMessage `json:"history"` | ||||
| 	//Prompt   string       `json:"prompt"` | ||||
| 	Messages []AliMessage `json:"messages"` | ||||
| } | ||||
|  | ||||
| type AliParameters struct { | ||||
| @@ -83,32 +83,17 @@ type AliChatResponse struct { | ||||
|  | ||||
| func requestOpenAI2Ali(request GeneralOpenAIRequest) *AliChatRequest { | ||||
| 	messages := make([]AliMessage, 0, len(request.Messages)) | ||||
| 	prompt := "" | ||||
| 	for i := 0; i < len(request.Messages); i++ { | ||||
| 		message := request.Messages[i] | ||||
| 		if message.Role == "system" { | ||||
| 			messages = append(messages, AliMessage{ | ||||
| 				User: message.StringContent(), | ||||
| 				Bot:  "Okay", | ||||
| 			}) | ||||
| 			continue | ||||
| 		} else { | ||||
| 			if i == len(request.Messages)-1 { | ||||
| 				prompt = message.StringContent() | ||||
| 				break | ||||
| 			} | ||||
| 			messages = append(messages, AliMessage{ | ||||
| 				User: message.StringContent(), | ||||
| 				Bot:  request.Messages[i+1].StringContent(), | ||||
| 			}) | ||||
| 			i++ | ||||
| 		} | ||||
| 		messages = append(messages, AliMessage{ | ||||
| 			Content: message.StringContent(), | ||||
| 			Role:    strings.ToLower(message.Role), | ||||
| 		}) | ||||
| 	} | ||||
| 	return &AliChatRequest{ | ||||
| 		Model: request.Model, | ||||
| 		Input: AliInput{ | ||||
| 			Prompt:  prompt, | ||||
| 			History: messages, | ||||
| 			Messages: messages, | ||||
| 		}, | ||||
| 		//Parameters: AliParameters{  // ChatGPT's parameters are not compatible with Ali's | ||||
| 		//	TopP: request.TopP, | ||||
|   | ||||
| @@ -1,11 +1,13 @@ | ||||
| package controller | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"one-api/common" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/gin-gonic/gin" | ||||
| ) | ||||
| @@ -112,7 +114,7 @@ func requestOpenAI2Gemini(textRequest GeneralOpenAIRequest) *GeminiChatRequest { | ||||
| 				Role: "model", | ||||
| 				Parts: []GeminiPart{ | ||||
| 					{ | ||||
| 						Text: "ok", | ||||
| 						Text: "Okay", | ||||
| 					}, | ||||
| 				}, | ||||
| 			}) | ||||
| @@ -128,6 +130,16 @@ type GeminiChatResponse struct { | ||||
| 	PromptFeedback GeminiChatPromptFeedback `json:"promptFeedback"` | ||||
| } | ||||
|  | ||||
| func (g *GeminiChatResponse) GetResponseText() string { | ||||
| 	if g == nil { | ||||
| 		return "" | ||||
| 	} | ||||
| 	if len(g.Candidates) > 0 && len(g.Candidates[0].Content.Parts) > 0 { | ||||
| 		return g.Candidates[0].Content.Parts[0].Text | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| type GeminiChatCandidate struct { | ||||
| 	Content       GeminiChatContent        `json:"content"` | ||||
| 	FinishReason  string                   `json:"finishReason"` | ||||
| @@ -156,10 +168,13 @@ func responseGeminiChat2OpenAI(response *GeminiChatResponse) *OpenAITextResponse | ||||
| 			Index: i, | ||||
| 			Message: Message{ | ||||
| 				Role:    "assistant", | ||||
| 				Content: candidate.Content.Parts[0].Text, | ||||
| 				Content: "", | ||||
| 			}, | ||||
| 			FinishReason: stopFinishReason, | ||||
| 		} | ||||
| 		if len(candidate.Content.Parts) > 0 { | ||||
| 			choice.Message.Content = candidate.Content.Parts[0].Text | ||||
| 		} | ||||
| 		fullTextResponse.Choices = append(fullTextResponse.Choices, choice) | ||||
| 	} | ||||
| 	return &fullTextResponse | ||||
| @@ -167,9 +182,7 @@ func responseGeminiChat2OpenAI(response *GeminiChatResponse) *OpenAITextResponse | ||||
|  | ||||
| func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) *ChatCompletionsStreamResponse { | ||||
| 	var choice ChatCompletionsStreamResponseChoice | ||||
| 	if len(geminiResponse.Candidates) > 0 && len(geminiResponse.Candidates[0].Content.Parts) > 0 { | ||||
| 		choice.Delta.Content = geminiResponse.Candidates[0].Content.Parts[0].Text | ||||
| 	} | ||||
| 	choice.Delta.Content = geminiResponse.GetResponseText() | ||||
| 	choice.FinishReason = &stopFinishReason | ||||
| 	var response ChatCompletionsStreamResponse | ||||
| 	response.Object = "chat.completion.chunk" | ||||
| @@ -180,50 +193,61 @@ func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) *ChatCo | ||||
|  | ||||
| func geminiChatStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, string) { | ||||
| 	responseText := "" | ||||
| 	responseId := fmt.Sprintf("chatcmpl-%s", common.GetUUID()) | ||||
| 	createdTime := common.GetTimestamp() | ||||
| 	dataChan := make(chan string) | ||||
| 	stopChan := make(chan bool) | ||||
| 	scanner := bufio.NewScanner(resp.Body) | ||||
| 	scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { | ||||
| 		if atEOF && len(data) == 0 { | ||||
| 			return 0, nil, nil | ||||
| 		} | ||||
| 		if i := strings.Index(string(data), "\n"); i >= 0 { | ||||
| 			return i + 1, data[0:i], nil | ||||
| 		} | ||||
| 		if atEOF { | ||||
| 			return len(data), data, nil | ||||
| 		} | ||||
| 		return 0, nil, nil | ||||
| 	}) | ||||
| 	go func() { | ||||
| 		responseBody, err := io.ReadAll(resp.Body) | ||||
| 		if err != nil { | ||||
| 			common.SysError("error reading stream response: " + err.Error()) | ||||
| 			stopChan <- true | ||||
| 			return | ||||
| 		for scanner.Scan() { | ||||
| 			data := scanner.Text() | ||||
| 			data = strings.TrimSpace(data) | ||||
| 			if !strings.HasPrefix(data, "\"text\": \"") { | ||||
| 				continue | ||||
| 			} | ||||
| 			data = strings.TrimPrefix(data, "\"text\": \"") | ||||
| 			data = strings.TrimSuffix(data, "\"") | ||||
| 			dataChan <- data | ||||
| 		} | ||||
| 		err = resp.Body.Close() | ||||
| 		if err != nil { | ||||
| 			common.SysError("error closing stream response: " + err.Error()) | ||||
| 			stopChan <- true | ||||
| 			return | ||||
| 		} | ||||
| 		var geminiResponse GeminiChatResponse | ||||
| 		err = json.Unmarshal(responseBody, &geminiResponse) | ||||
| 		if err != nil { | ||||
| 			common.SysError("error unmarshalling stream response: " + err.Error()) | ||||
| 			stopChan <- true | ||||
| 			return | ||||
| 		} | ||||
| 		fullTextResponse := streamResponseGeminiChat2OpenAI(&geminiResponse) | ||||
| 		fullTextResponse.Id = responseId | ||||
| 		fullTextResponse.Created = createdTime | ||||
| 		if len(geminiResponse.Candidates) > 0 && len(geminiResponse.Candidates[0].Content.Parts) > 0 { | ||||
| 			responseText += geminiResponse.Candidates[0].Content.Parts[0].Text | ||||
| 		} | ||||
| 		jsonResponse, err := json.Marshal(fullTextResponse) | ||||
| 		if err != nil { | ||||
| 			common.SysError("error marshalling stream response: " + err.Error()) | ||||
| 			stopChan <- true | ||||
| 			return | ||||
| 		} | ||||
| 		dataChan <- string(jsonResponse) | ||||
| 		stopChan <- true | ||||
| 	}() | ||||
| 	setEventStreamHeaders(c) | ||||
| 	c.Stream(func(w io.Writer) bool { | ||||
| 		select { | ||||
| 		case data := <-dataChan: | ||||
| 			c.Render(-1, common.CustomEvent{Data: "data: " + data}) | ||||
| 			// this is used to prevent annoying \ related format bug | ||||
| 			data = fmt.Sprintf("{\"content\": \"%s\"}", data) | ||||
| 			type dummyStruct struct { | ||||
| 				Content string `json:"content"` | ||||
| 			} | ||||
| 			var dummy dummyStruct | ||||
| 			err := json.Unmarshal([]byte(data), &dummy) | ||||
| 			responseText += dummy.Content | ||||
| 			var choice ChatCompletionsStreamResponseChoice | ||||
| 			choice.Delta.Content = dummy.Content | ||||
| 			response := ChatCompletionsStreamResponse{ | ||||
| 				Id:      fmt.Sprintf("chatcmpl-%s", common.GetUUID()), | ||||
| 				Object:  "chat.completion.chunk", | ||||
| 				Created: common.GetTimestamp(), | ||||
| 				Model:   "gemini-pro", | ||||
| 				Choices: []ChatCompletionsStreamResponseChoice{choice}, | ||||
| 			} | ||||
| 			jsonResponse, err := json.Marshal(response) | ||||
| 			if err != nil { | ||||
| 				common.SysError("error marshalling stream response: " + err.Error()) | ||||
| 				return true | ||||
| 			} | ||||
| 			c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)}) | ||||
| 			return true | ||||
| 		case <-stopChan: | ||||
| 			c.Render(-1, common.CustomEvent{Data: "data: [DONE]"}) | ||||
| @@ -263,7 +287,7 @@ func geminiChatHandler(c *gin.Context, resp *http.Response, promptTokens int, mo | ||||
| 		}, nil | ||||
| 	} | ||||
| 	fullTextResponse := responseGeminiChat2OpenAI(&geminiResponse) | ||||
| 	completionTokens := countTokenText(geminiResponse.Candidates[0].Content.Parts[0].Text, model) | ||||
| 	completionTokens := countTokenText(geminiResponse.GetResponseText(), model) | ||||
| 	usage := Usage{ | ||||
| 		PromptTokens:     promptTokens, | ||||
| 		CompletionTokens: completionTokens, | ||||
|   | ||||
| @@ -19,7 +19,6 @@ func isWithinRange(element string, value int) bool { | ||||
| 	if _, ok := common.DalleGenerationImageAmounts[element]; !ok { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	min := common.DalleGenerationImageAmounts[element][0] | ||||
| 	max := common.DalleGenerationImageAmounts[element][1] | ||||
|  | ||||
| @@ -42,6 +41,10 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode | ||||
| 		return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest) | ||||
| 	} | ||||
|  | ||||
| 	if imageRequest.N == 0 { | ||||
| 		imageRequest.N = 1 | ||||
| 	} | ||||
|  | ||||
| 	// Size validation | ||||
| 	if imageRequest.Size != "" { | ||||
| 		imageSize = imageRequest.Size | ||||
| @@ -79,7 +82,10 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode | ||||
|  | ||||
| 	// Number of generated images validation | ||||
| 	if isWithinRange(imageModel, imageRequest.N) == false { | ||||
| 		return errorWrapper(errors.New("invalid value of n"), "n_not_within_range", http.StatusBadRequest) | ||||
| 		// channel not azure | ||||
| 		if channelType != common.ChannelTypeAzure { | ||||
| 			return errorWrapper(errors.New("invalid value of n"), "n_not_within_range", http.StatusBadRequest) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// map model name | ||||
| @@ -102,7 +108,7 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode | ||||
| 		baseURL = c.GetString("base_url") | ||||
| 	} | ||||
| 	fullRequestURL := getFullRequestURL(baseURL, requestURL, channelType) | ||||
| 	if channelType == common.ChannelTypeAzure && relayMode == RelayModeImagesGenerations { | ||||
| 	if channelType == common.ChannelTypeAzure { | ||||
| 		// https://learn.microsoft.com/en-us/azure/ai-services/openai/dall-e-quickstart?tabs=dalle3%2Ccommand-line&pivots=rest-api | ||||
| 		apiVersion := GetAPIVersion(c) | ||||
| 		// https://{resource_name}.openai.azure.com/openai/deployments/dall-e-3/images/generations?api-version=2023-06-01-preview | ||||
|   | ||||
| @@ -58,6 +58,9 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest) | ||||
| 	} | ||||
| 	if textRequest.MaxTokens < 0 || textRequest.MaxTokens > math.MaxInt32/2 { | ||||
| 		return errorWrapper(errors.New("max_tokens is invalid"), "invalid_max_tokens", http.StatusBadRequest) | ||||
| 	} | ||||
| 	if relayMode == RelayModeModerations && textRequest.Model == "" { | ||||
| 		textRequest.Model = "text-moderation-latest" | ||||
| 	} | ||||
| @@ -190,10 +193,9 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 			version = c.GetString("api_version") | ||||
| 		} | ||||
| 		action := "generateContent" | ||||
| 		// actually gemini does not support stream, it's fake | ||||
| 		//if textRequest.Stream { | ||||
| 		//	action = "streamGenerateContent" | ||||
| 		//} | ||||
| 		if textRequest.Stream { | ||||
| 			action = "streamGenerateContent" | ||||
| 		} | ||||
| 		fullRequestURL = fmt.Sprintf("%s/%s/models/%s:%s", requestBaseURL, version, textRequest.Model, action) | ||||
| 		apiKey := c.Request.Header.Get("Authorization") | ||||
| 		apiKey = strings.TrimPrefix(apiKey, "Bearer ") | ||||
|   | ||||
| @@ -263,11 +263,52 @@ func setEventStreamHeaders(c *gin.Context) { | ||||
| 	c.Writer.Header().Set("X-Accel-Buffering", "no") | ||||
| } | ||||
|  | ||||
| type GeneralErrorResponse struct { | ||||
| 	Error    OpenAIError `json:"error"` | ||||
| 	Message  string      `json:"message"` | ||||
| 	Msg      string      `json:"msg"` | ||||
| 	Err      string      `json:"err"` | ||||
| 	ErrorMsg string      `json:"error_msg"` | ||||
| 	Header   struct { | ||||
| 		Message string `json:"message"` | ||||
| 	} `json:"header"` | ||||
| 	Response struct { | ||||
| 		Error struct { | ||||
| 			Message string `json:"message"` | ||||
| 		} `json:"error"` | ||||
| 	} `json:"response"` | ||||
| } | ||||
|  | ||||
| func (e GeneralErrorResponse) ToMessage() string { | ||||
| 	if e.Error.Message != "" { | ||||
| 		return e.Error.Message | ||||
| 	} | ||||
| 	if e.Message != "" { | ||||
| 		return e.Message | ||||
| 	} | ||||
| 	if e.Msg != "" { | ||||
| 		return e.Msg | ||||
| 	} | ||||
| 	if e.Err != "" { | ||||
| 		return e.Err | ||||
| 	} | ||||
| 	if e.ErrorMsg != "" { | ||||
| 		return e.ErrorMsg | ||||
| 	} | ||||
| 	if e.Header.Message != "" { | ||||
| 		return e.Header.Message | ||||
| 	} | ||||
| 	if e.Response.Error.Message != "" { | ||||
| 		return e.Response.Error.Message | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func relayErrorHandler(resp *http.Response) (openAIErrorWithStatusCode *OpenAIErrorWithStatusCode) { | ||||
| 	openAIErrorWithStatusCode = &OpenAIErrorWithStatusCode{ | ||||
| 		StatusCode: resp.StatusCode, | ||||
| 		OpenAIError: OpenAIError{ | ||||
| 			Message: fmt.Sprintf("bad response status code %d", resp.StatusCode), | ||||
| 			Message: "", | ||||
| 			Type:    "upstream_error", | ||||
| 			Code:    "bad_response_status_code", | ||||
| 			Param:   strconv.Itoa(resp.StatusCode), | ||||
| @@ -281,12 +322,20 @@ func relayErrorHandler(resp *http.Response) (openAIErrorWithStatusCode *OpenAIEr | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	var textResponse TextResponse | ||||
| 	err = json.Unmarshal(responseBody, &textResponse) | ||||
| 	var errResponse GeneralErrorResponse | ||||
| 	err = json.Unmarshal(responseBody, &errResponse) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	openAIErrorWithStatusCode.OpenAIError = textResponse.Error | ||||
| 	if errResponse.Error.Message != "" { | ||||
| 		// OpenAI format error, so we override the default one | ||||
| 		openAIErrorWithStatusCode.OpenAIError = errResponse.Error | ||||
| 	} else { | ||||
| 		openAIErrorWithStatusCode.OpenAIError.Message = errResponse.ToMessage() | ||||
| 	} | ||||
| 	if openAIErrorWithStatusCode.OpenAIError.Message == "" { | ||||
| 		openAIErrorWithStatusCode.OpenAIError.Message = fmt.Sprintf("bad response status code %d", resp.StatusCode) | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -230,7 +230,13 @@ func xunfeiHandler(c *gin.Context, textRequest GeneralOpenAIRequest, appId strin | ||||
| 		case stop = <-stopChan: | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if len(xunfeiResponse.Payload.Choices.Text) == 0 { | ||||
| 		xunfeiResponse.Payload.Choices.Text = []XunfeiChatResponseTextItem{ | ||||
| 			{ | ||||
| 				Content: "", | ||||
| 			}, | ||||
| 		} | ||||
| 	} | ||||
| 	xunfeiResponse.Payload.Choices.Text[0].Content = content | ||||
|  | ||||
| 	response := responseXunfei2OpenAI(&xunfeiResponse) | ||||
|   | ||||
| @@ -236,7 +236,7 @@ type ChatCompletionsStreamResponseChoice struct { | ||||
| 	Delta struct { | ||||
| 		Content string `json:"content"` | ||||
| 	} `json:"delta"` | ||||
| 	FinishReason *string `json:"finish_reason"` | ||||
| 	FinishReason *string `json:"finish_reason,omitempty"` | ||||
| } | ||||
|  | ||||
| type ChatCompletionsStreamResponse struct { | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| package model | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"gorm.io/driver/mysql" | ||||
| 	"gorm.io/driver/postgres" | ||||
| 	"gorm.io/driver/sqlite" | ||||
| @@ -59,7 +60,8 @@ func chooseDB() (*gorm.DB, error) { | ||||
| 	// Use SQLite | ||||
| 	common.SysLog("SQL_DSN not set, using SQLite as database") | ||||
| 	common.UsingSQLite = true | ||||
| 	return gorm.Open(sqlite.Open(common.SQLitePath), &gorm.Config{ | ||||
| 	config := fmt.Sprintf("?_busy_timeout=%d", common.SQLiteBusyTimeout) | ||||
| 	return gorm.Open(sqlite.Open(common.SQLitePath+config), &gorm.Config{ | ||||
| 		PrepareStmt: true, // precompile SQL | ||||
| 	}) | ||||
| } | ||||
|   | ||||
| @@ -69,7 +69,7 @@ const EditChannel = () => { | ||||
|           localModels = ['ERNIE-Bot', 'ERNIE-Bot-turbo', 'ERNIE-Bot-4', 'Embedding-V1']; | ||||
|           break; | ||||
|         case 17: | ||||
|           localModels = ['qwen-turbo', 'qwen-plus', 'text-embedding-v1']; | ||||
|           localModels = ['qwen-turbo', 'qwen-plus', 'qwen-max', 'qwen-max-longcontext', 'text-embedding-v1']; | ||||
|           break; | ||||
|         case 16: | ||||
|           localModels = ['chatglm_turbo', 'chatglm_pro', 'chatglm_std', 'chatglm_lite']; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user