mirror of
				https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
				synced 2025-10-31 14:23:43 +08:00 
			
		
		
		
	Compare commits
	
		
			11 Commits
		
	
	
		
			feat/markd
			...
			4faed83349
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 4faed83349 | ||
|  | 11b37c15bd | ||
|  | 1d0038f17d | ||
|  | 619fa519c0 | ||
|  | 69fcb92a3b | ||
|  | 3b5b496599 | ||
|  | 2db4caace4 | ||
|  | fd8ad63655 | ||
|  | c41c2b538a | ||
|  | 4d6c82deb9 | ||
|  | b09f458aeb | 
| @@ -308,11 +308,14 @@ If you want to disable parse settings from url, set this to 1. | ||||
|  | ||||
| To control custom models, use `+` to add a custom model, use `-` to hide a model, use `name=displayName` to customize model name, separated by comma. | ||||
|  | ||||
| User `-all` to disable all default models, `+all` to enable all default models. | ||||
| Use `-all` to disable all default models, `+all` to enable all default models. | ||||
| Use `-*provider` to disable specified models.  | ||||
| Current valid providers: `openai,azure,google,anthropic,baidu,bytedance,alibaba,tencent,moonshot,iflytek,xai,chatglm` and more to come. | ||||
|  | ||||
| For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name. | ||||
| > Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list. | ||||
| > If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list. | ||||
| > If you don't want to use Azure model, using `-*azure` will prevent Azure models from appearing in the model list. | ||||
|  | ||||
| For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name. | ||||
| > Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list. | ||||
|   | ||||
| @@ -417,6 +417,14 @@ export const KnowledgeCutOffDate: Record<string, string> = { | ||||
|   "gpt-4-turbo": "2023-12", | ||||
|   "gpt-4-turbo-2024-04-09": "2023-12", | ||||
|   "gpt-4-turbo-preview": "2023-12", | ||||
|   "gpt-4.1": "2024-06", | ||||
|   "gpt-4.1-2025-04-14": "2024-06", | ||||
|   "gpt-4.1-mini": "2024-06", | ||||
|   "gpt-4.1-mini-2025-04-14": "2024-06", | ||||
|   "gpt-4.1-nano": "2024-06", | ||||
|   "gpt-4.1-nano-2025-04-14": "2024-06", | ||||
|   "gpt-4.5-preview": "2023-10", | ||||
|   "gpt-4.5-preview-2025-02-27": "2023-10", | ||||
|   "gpt-4o": "2023-10", | ||||
|   "gpt-4o-2024-05-13": "2023-10", | ||||
|   "gpt-4o-2024-08-06": "2023-10", | ||||
| @@ -458,6 +466,7 @@ export const DEFAULT_TTS_VOICES = [ | ||||
| export const VISION_MODEL_REGEXES = [ | ||||
|   /vision/, | ||||
|   /gpt-4o/, | ||||
|   /gpt-4\.1/, | ||||
|   /claude-3/, | ||||
|   /gemini-1\.5/, | ||||
|   /gemini-exp/, | ||||
| @@ -485,6 +494,14 @@ const openaiModels = [ | ||||
|   "gpt-4-32k-0613", | ||||
|   "gpt-4-turbo", | ||||
|   "gpt-4-turbo-preview", | ||||
|   "gpt-4.1", | ||||
|   "gpt-4.1-2025-04-14", | ||||
|   "gpt-4.1-mini", | ||||
|   "gpt-4.1-mini-2025-04-14", | ||||
|   "gpt-4.1-nano", | ||||
|   "gpt-4.1-nano-2025-04-14", | ||||
|   "gpt-4.5-preview", | ||||
|   "gpt-4.5-preview-2025-02-27", | ||||
|   "gpt-4o", | ||||
|   "gpt-4o-2024-05-13", | ||||
|   "gpt-4o-2024-08-06", | ||||
|   | ||||
| @@ -76,6 +76,7 @@ export function collectModelTable( | ||||
|   // server custom models | ||||
|   customModels | ||||
|     .split(",") | ||||
|     .map((v) => v.trim()) | ||||
|     .filter((v) => !!v && v.length > 0) | ||||
|     .forEach((m) => { | ||||
|       const available = !m.startsWith("-"); | ||||
| @@ -88,6 +89,13 @@ export function collectModelTable( | ||||
|         Object.values(modelTable).forEach( | ||||
|           (model) => (model.available = available), | ||||
|         ); | ||||
|       } else if (name.startsWith("*")) { | ||||
|         const modelId = name.substring(1).toLowerCase(); | ||||
|         Object.values(modelTable).forEach((model) => { | ||||
|           if (model?.provider?.id === modelId) { | ||||
|             model.available = available; | ||||
|           } | ||||
|         }); | ||||
|       } else { | ||||
|         // 1. find model by name, and set available value | ||||
|         const [customModelName, customProviderName] = getModelProvider(name); | ||||
|   | ||||
							
								
								
									
										142
									
								
								test/model.test.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										142
									
								
								test/model.test.ts
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,142 @@ | ||||
| import { collectModelTable } from "@/app/utils/model" | ||||
| import { LLMModel,LLMModelProvider } from "@/app/client/api"; | ||||
|  | ||||
| describe('collectModelTable', () => { | ||||
|   const mockModels: readonly LLMModel[] = [ | ||||
|     { | ||||
|       name: 'gpt-3.5-turbo', | ||||
|       available: true, | ||||
|       provider: { | ||||
|         id: 'openai', | ||||
|         providerName: 'OpenAI', | ||||
|         providerType: 'openai', | ||||
|       } as LLMModelProvider, | ||||
|       sorted: 1, | ||||
|     }, | ||||
|     { | ||||
|       name: 'gpt-4', | ||||
|       available: true, | ||||
|       provider: { | ||||
|         id: 'openai', | ||||
|         providerName: 'OpenAI', | ||||
|         providerType: 'openai', | ||||
|       } as LLMModelProvider, | ||||
|       sorted: 1, | ||||
|     }, | ||||
|     { | ||||
|       name: 'gpt-3.5-turbo', | ||||
|       available: true, | ||||
|       provider: { | ||||
|         id: 'azure', | ||||
|         providerName: 'Azure', | ||||
|         providerType: 'azure', | ||||
|       } as LLMModelProvider, | ||||
|       sorted: 2, | ||||
|     }, | ||||
|     { | ||||
|       name: 'gpt-4', | ||||
|       available: true, | ||||
|       provider: { | ||||
|         id: 'azure', | ||||
|         providerName: 'Azure', | ||||
|         providerType: 'azure', | ||||
|       } as LLMModelProvider, | ||||
|       sorted: 2, | ||||
|     }, | ||||
|     { | ||||
|       name: 'gemini-pro', | ||||
|       available: true, | ||||
|       provider: { | ||||
|         id: 'google', | ||||
|         providerName: 'Google', | ||||
|         providerType: 'google', | ||||
|       } as LLMModelProvider, | ||||
|       sorted: 3, | ||||
|     }, | ||||
|     { | ||||
|       name: 'claude-3-haiku-20240307', | ||||
|       available: true, | ||||
|       provider: { | ||||
|         id: 'anthropic', | ||||
|         providerName: 'Anthropic', | ||||
|         providerType: 'anthropic', | ||||
|       } as LLMModelProvider, | ||||
|       sorted: 4, | ||||
|     }, | ||||
|     { | ||||
|       name: 'grok-beta', | ||||
|       available: true, | ||||
|       provider: { | ||||
|         id: 'xai', | ||||
|         providerName: 'XAI', | ||||
|         providerType: 'xai', | ||||
|       } as LLMModelProvider, | ||||
|       sorted: 11, | ||||
|     }, | ||||
|   ]; | ||||
|  | ||||
|   test('all models shoule be available', () => { | ||||
|     const customModels = ''; | ||||
|     const result = collectModelTable(mockModels, customModels); | ||||
|  | ||||
|     expect(result['gpt-3.5-turbo@openai'].available).toBe(true); | ||||
|     expect(result['gpt-4@openai'].available).toBe(true); | ||||
|     expect(result['gpt-3.5-turbo@azure'].available).toBe(true); | ||||
|     expect(result['gpt-4@azure'].available).toBe(true); | ||||
|     expect(result['gemini-pro@google'].available).toBe(true); | ||||
|     expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true); | ||||
|     expect(result['grok-beta@xai'].available).toBe(true); | ||||
|   }); | ||||
|   test('should exclude all models when custom is "-all"', () => { | ||||
|     const customModels = '-all'; | ||||
|     const result = collectModelTable(mockModels, customModels); | ||||
|  | ||||
|     expect(result['gpt-3.5-turbo@openai'].available).toBe(false); | ||||
|     expect(result['gpt-4@openai'].available).toBe(false); | ||||
|     expect(result['gpt-3.5-turbo@azure'].available).toBe(false); | ||||
|     expect(result['gpt-4@azure'].available).toBe(false); | ||||
|     expect(result['gemini-pro@google'].available).toBe(false); | ||||
|     expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false); | ||||
|     expect(result['grok-beta@xai'].available).toBe(false); | ||||
|   }); | ||||
|  | ||||
|   test('should exclude all Azure models when custom is "-*azure"', () => { | ||||
|     const customModels = '-*azure'; | ||||
|     const result = collectModelTable(mockModels, customModels); | ||||
|  | ||||
|     expect(result['gpt-3.5-turbo@openai'].available).toBe(true); | ||||
|     expect(result['gpt-4@openai'].available).toBe(true); | ||||
|     expect(result['gpt-3.5-turbo@azure'].available).toBe(false); | ||||
|     expect(result['gpt-4@azure'].available).toBe(false); | ||||
|     expect(result['gemini-pro@google'].available).toBe(true); | ||||
|     expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true); | ||||
|     expect(result['grok-beta@xai'].available).toBe(true); | ||||
|   }); | ||||
|  | ||||
|   test('should exclude Google and XAI models when custom is "-*google,-*xai"', () => { | ||||
|     const customModels = '-*google,-*xai'; | ||||
|     const result = collectModelTable(mockModels, customModels); | ||||
|  | ||||
|     expect(result['gpt-3.5-turbo@openai'].available).toBe(true); | ||||
|     expect(result['gpt-4@openai'].available).toBe(true); | ||||
|     expect(result['gpt-3.5-turbo@azure'].available).toBe(true); | ||||
|     expect(result['gpt-4@azure'].available).toBe(true); | ||||
|     expect(result['gemini-pro@google'].available).toBe(false); | ||||
|     expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true); | ||||
|     expect(result['grok-beta@xai'].available).toBe(false); | ||||
|   }); | ||||
|  | ||||
|   test('All models except OpenAI should be excluded, and additional models should be added when customized as "-all, +*openai,gpt-4o@azure"', () => { | ||||
|     const customModels = '-all,+*openai,gpt-4o@azure'; | ||||
|     const result = collectModelTable(mockModels, customModels); | ||||
|  | ||||
|     expect(result['gpt-3.5-turbo@openai'].available).toBe(true); | ||||
|     expect(result['gpt-4@openai'].available).toBe(true); | ||||
|     expect(result['gpt-3.5-turbo@azure'].available).toBe(false); | ||||
|     expect(result['gpt-4@azure'].available).toBe(false); | ||||
|     expect(result['gemini-pro@google'].available).toBe(false); | ||||
|     expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false); | ||||
|     expect(result['grok-beta@xai'].available).toBe(false); | ||||
|     expect(result['gpt-4o@azure'].available).toBe(true); | ||||
|   }); | ||||
| }); | ||||
		Reference in New Issue
	
	Block a user