mirror of
				https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
				synced 2025-11-04 16:23:41 +08:00 
			
		
		
		
	Compare commits
	
		
			1 Commits
		
	
	
		
			f744dfa5d0
			...
			update-max
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					ebe617b733 | 
@@ -63,7 +63,7 @@ export interface RequestPayload {
 | 
				
			|||||||
  presence_penalty: number;
 | 
					  presence_penalty: number;
 | 
				
			||||||
  frequency_penalty: number;
 | 
					  frequency_penalty: number;
 | 
				
			||||||
  top_p: number;
 | 
					  top_p: number;
 | 
				
			||||||
  max_tokens?: number;
 | 
					  max_completions_tokens?: number;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export interface DalleRequestPayload {
 | 
					export interface DalleRequestPayload {
 | 
				
			||||||
@@ -228,13 +228,16 @@ export class ChatGPTApi implements LLMApi {
 | 
				
			|||||||
        presence_penalty: !isO1 ? modelConfig.presence_penalty : 0,
 | 
					        presence_penalty: !isO1 ? modelConfig.presence_penalty : 0,
 | 
				
			||||||
        frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0,
 | 
					        frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0,
 | 
				
			||||||
        top_p: !isO1 ? modelConfig.top_p : 1,
 | 
					        top_p: !isO1 ? modelConfig.top_p : 1,
 | 
				
			||||||
        // max_tokens: Math.max(modelConfig.max_tokens, 1024),
 | 
					        // max_completions_tokens: Math.max(modelConfig.max_completions_tokens, 1024),
 | 
				
			||||||
        // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
 | 
					        // Please do not ask me why not send max_completions_tokens, no reason, this param is just shit, I dont want to explain anymore.
 | 
				
			||||||
      };
 | 
					      };
 | 
				
			||||||
 | 
					
 | 
				
			||||||
      // add max_tokens to vision model
 | 
					      // add max_completions_tokens to vision model
 | 
				
			||||||
      if (visionModel) {
 | 
					      if (visionModel) {
 | 
				
			||||||
        requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
 | 
					        requestPayload["max_completions_tokens"] = Math.max(
 | 
				
			||||||
 | 
					          modelConfig.max_completions_tokens,
 | 
				
			||||||
 | 
					          4000,
 | 
				
			||||||
 | 
					        );
 | 
				
			||||||
      }
 | 
					      }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -65,7 +65,7 @@ export const DEFAULT_CONFIG = {
 | 
				
			|||||||
    providerName: "OpenAI" as ServiceProvider,
 | 
					    providerName: "OpenAI" as ServiceProvider,
 | 
				
			||||||
    temperature: 0.5,
 | 
					    temperature: 0.5,
 | 
				
			||||||
    top_p: 1,
 | 
					    top_p: 1,
 | 
				
			||||||
    max_tokens: 4000,
 | 
					    max_completions_tokens: 4000,
 | 
				
			||||||
    presence_penalty: 0,
 | 
					    presence_penalty: 0,
 | 
				
			||||||
    frequency_penalty: 0,
 | 
					    frequency_penalty: 0,
 | 
				
			||||||
    sendMemory: true,
 | 
					    sendMemory: true,
 | 
				
			||||||
@@ -127,7 +127,7 @@ export const ModalConfigValidator = {
 | 
				
			|||||||
  model(x: string) {
 | 
					  model(x: string) {
 | 
				
			||||||
    return x as ModelType;
 | 
					    return x as ModelType;
 | 
				
			||||||
  },
 | 
					  },
 | 
				
			||||||
  max_tokens(x: number) {
 | 
					  max_completions_tokens(x: number) {
 | 
				
			||||||
    return limitNumber(x, 0, 512000, 1024);
 | 
					    return limitNumber(x, 0, 512000, 1024);
 | 
				
			||||||
  },
 | 
					  },
 | 
				
			||||||
  presence_penalty(x: number) {
 | 
					  presence_penalty(x: number) {
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user