mirror of
				https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
				synced 2025-11-04 16:23:41 +08:00 
			
		
		
		
	Merge pull request #6599 from DreamRivulet/add-support-GPT5
	
		
			
	
		
	
	
		
	
		
			Some checks failed
		
		
	
	
		
			
				
	
				Run Tests / test (push) Has been cancelled
				
			
		
		
	
	
				
					
				
			
		
			Some checks failed
		
		
	
	Run Tests / test (push) Has been cancelled
				
			add: model gpt-5
This commit is contained in:
		@@ -200,6 +200,7 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
      options.config.model.startsWith("o1") ||
 | 
			
		||||
      options.config.model.startsWith("o3") ||
 | 
			
		||||
      options.config.model.startsWith("o4-mini");
 | 
			
		||||
    const isGpt5 =  options.config.model.startsWith("gpt-5");
 | 
			
		||||
    if (isDalle3) {
 | 
			
		||||
      const prompt = getMessageTextContent(
 | 
			
		||||
        options.messages.slice(-1)?.pop() as any,
 | 
			
		||||
@@ -230,7 +231,7 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
        messages,
 | 
			
		||||
        stream: options.config.stream,
 | 
			
		||||
        model: modelConfig.model,
 | 
			
		||||
        temperature: !isO1OrO3 ? modelConfig.temperature : 1,
 | 
			
		||||
        temperature: (!isO1OrO3 && !isGpt5) ? modelConfig.temperature : 1,
 | 
			
		||||
        presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
 | 
			
		||||
        frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
 | 
			
		||||
        top_p: !isO1OrO3 ? modelConfig.top_p : 1,
 | 
			
		||||
@@ -238,7 +239,13 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
        // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
 | 
			
		||||
      };
 | 
			
		||||
 | 
			
		||||
      if (isO1OrO3) {
 | 
			
		||||
      if (isGpt5) {
 | 
			
		||||
  	// Remove max_tokens if present
 | 
			
		||||
  	delete requestPayload.max_tokens;
 | 
			
		||||
  	// Add max_completion_tokens (or max_completion_tokens if that's what you meant)
 | 
			
		||||
  	requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
 | 
			
		||||
 | 
			
		||||
      } else if (isO1OrO3) {
 | 
			
		||||
        // by default the o1/o3 models will not attempt to produce output that includes markdown formatting
 | 
			
		||||
        // manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
 | 
			
		||||
        // (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
 | 
			
		||||
@@ -251,8 +258,9 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
        requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
      // add max_tokens to vision model
 | 
			
		||||
      if (visionModel && !isO1OrO3) {
 | 
			
		||||
      if (visionModel && !isO1OrO3 && ! isGpt5) {
 | 
			
		||||
        requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
@@ -493,6 +493,7 @@ export const VISION_MODEL_REGEXES = [
 | 
			
		||||
  /o3/,
 | 
			
		||||
  /o4-mini/,
 | 
			
		||||
  /grok-4/i,
 | 
			
		||||
  /gpt-5/
 | 
			
		||||
];
 | 
			
		||||
 | 
			
		||||
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
 | 
			
		||||
@@ -517,6 +518,11 @@ const openaiModels = [
 | 
			
		||||
  "gpt-4.1-nano-2025-04-14",
 | 
			
		||||
  "gpt-4.5-preview",
 | 
			
		||||
  "gpt-4.5-preview-2025-02-27",
 | 
			
		||||
  "gpt-5-chat",
 | 
			
		||||
  "gpt-5-mini",
 | 
			
		||||
  "gpt-5-nano",
 | 
			
		||||
  "gpt-5",
 | 
			
		||||
  "gpt-5-chat-2025-01-01-preview",
 | 
			
		||||
  "gpt-4o",
 | 
			
		||||
  "gpt-4o-2024-05-13",
 | 
			
		||||
  "gpt-4o-2024-08-06",
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user