mirror of
				https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
				synced 2025-11-04 08:13:43 +08:00 
			
		
		
		
	use stream when request o1
This commit is contained in:
		@@ -224,7 +224,7 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
      // O1 not support image, tools (plugin in ChatGPTNextWeb) and system, stream, logprobs, temperature, top_p, n, presence_penalty, frequency_penalty yet.
 | 
			
		||||
      requestPayload = {
 | 
			
		||||
        messages,
 | 
			
		||||
        stream: !isO1 ? options.config.stream : false,
 | 
			
		||||
        stream: options.config.stream,
 | 
			
		||||
        model: modelConfig.model,
 | 
			
		||||
        temperature: !isO1 ? modelConfig.temperature : 1,
 | 
			
		||||
        presence_penalty: !isO1 ? modelConfig.presence_penalty : 0,
 | 
			
		||||
@@ -247,7 +247,7 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
 | 
			
		||||
    console.log("[Request] openai payload: ", requestPayload);
 | 
			
		||||
 | 
			
		||||
    const shouldStream = !isDalle3 && !!options.config.stream && !isO1;
 | 
			
		||||
    const shouldStream = !isDalle3 && !!options.config.stream;
 | 
			
		||||
    const controller = new AbortController();
 | 
			
		||||
    options.onController?.(controller);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user