mirror of
				https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
				synced 2025-10-31 06:13:43 +08:00 
			
		
		
		
	Compare commits
	
		
			1 Commits
		
	
	
		
			62d32f317d
			...
			update-max
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | ebe617b733 | 
| @@ -63,7 +63,7 @@ export interface RequestPayload { | ||||
|   presence_penalty: number; | ||||
|   frequency_penalty: number; | ||||
|   top_p: number; | ||||
|   max_tokens?: number; | ||||
|   max_completions_tokens?: number; | ||||
| } | ||||
|  | ||||
| export interface DalleRequestPayload { | ||||
| @@ -228,13 +228,16 @@ export class ChatGPTApi implements LLMApi { | ||||
|         presence_penalty: !isO1 ? modelConfig.presence_penalty : 0, | ||||
|         frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0, | ||||
|         top_p: !isO1 ? modelConfig.top_p : 1, | ||||
|         // max_tokens: Math.max(modelConfig.max_tokens, 1024), | ||||
|         // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. | ||||
|         // max_completions_tokens: Math.max(modelConfig.max_completions_tokens, 1024), | ||||
|         // Please do not ask me why not send max_completions_tokens, no reason, this param is just shit, I dont want to explain anymore. | ||||
|       }; | ||||
|  | ||||
|       // add max_tokens to vision model | ||||
|       // add max_completions_tokens to vision model | ||||
|       if (visionModel) { | ||||
|         requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); | ||||
|         requestPayload["max_completions_tokens"] = Math.max( | ||||
|           modelConfig.max_completions_tokens, | ||||
|           4000, | ||||
|         ); | ||||
|       } | ||||
|     } | ||||
|  | ||||
|   | ||||
| @@ -65,7 +65,7 @@ export const DEFAULT_CONFIG = { | ||||
|     providerName: "OpenAI" as ServiceProvider, | ||||
|     temperature: 0.5, | ||||
|     top_p: 1, | ||||
|     max_tokens: 4000, | ||||
|     max_completions_tokens: 4000, | ||||
|     presence_penalty: 0, | ||||
|     frequency_penalty: 0, | ||||
|     sendMemory: true, | ||||
| @@ -127,7 +127,7 @@ export const ModalConfigValidator = { | ||||
|   model(x: string) { | ||||
|     return x as ModelType; | ||||
|   }, | ||||
|   max_tokens(x: number) { | ||||
|   max_completions_tokens(x: number) { | ||||
|     return limitNumber(x, 0, 512000, 1024); | ||||
|   }, | ||||
|   presence_penalty(x: number) { | ||||
|   | ||||
		Reference in New Issue
	
	Block a user