mirror of
				https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
				synced 2025-11-04 16:23:41 +08:00 
			
		
		
		
	Compare commits
	
		
			8 Commits
		
	
	
		
			dependabot
			...
			536ee8752d
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					536ee8752d | ||
| 
						 | 
					69fcb92a3b | ||
| 
						 | 
					3b5b496599 | ||
| 
						 | 
					2db4caace4 | ||
| 
						 | 
					fd8ad63655 | ||
| 
						 | 
					c41c2b538a | ||
| 
						 | 
					4d6c82deb9 | ||
| 
						 | 
					b09f458aeb | 
@@ -309,11 +309,14 @@ If you want to disable parse settings from url, set this to 1.
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
To control custom models, use `+` to add a custom model, use `-` to hide a model, use `name=displayName` to customize model name, separated by comma.
 | 
					To control custom models, use `+` to add a custom model, use `-` to hide a model, use `name=displayName` to customize model name, separated by comma.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
User `-all` to disable all default models, `+all` to enable all default models.
 | 
					Use `-all` to disable all default models, `+all` to enable all default models.
 | 
				
			||||||
 | 
					Use `-*provider` to disable specified models. 
 | 
				
			||||||
 | 
					Current valid providers: `openai,azure,google,anthropic,baidu,bytedance,alibaba,tencent,moonshot,iflytek,xai,chatglm` and more to come.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name.
 | 
					For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name.
 | 
				
			||||||
> Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list.
 | 
					> Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list.
 | 
				
			||||||
> If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list.
 | 
					> If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list.
 | 
				
			||||||
 | 
					> If you don't want to use Azure model, using `-*azure` will prevent Azure models from appearing in the model list.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name.
 | 
					For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name.
 | 
				
			||||||
> Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list.
 | 
					> Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list.
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -76,6 +76,7 @@ export function collectModelTable(
 | 
				
			|||||||
  // server custom models
 | 
					  // server custom models
 | 
				
			||||||
  customModels
 | 
					  customModels
 | 
				
			||||||
    .split(",")
 | 
					    .split(",")
 | 
				
			||||||
 | 
					    .map((v) => v.trim())
 | 
				
			||||||
    .filter((v) => !!v && v.length > 0)
 | 
					    .filter((v) => !!v && v.length > 0)
 | 
				
			||||||
    .forEach((m) => {
 | 
					    .forEach((m) => {
 | 
				
			||||||
      const available = !m.startsWith("-");
 | 
					      const available = !m.startsWith("-");
 | 
				
			||||||
@@ -88,6 +89,13 @@ export function collectModelTable(
 | 
				
			|||||||
        Object.values(modelTable).forEach(
 | 
					        Object.values(modelTable).forEach(
 | 
				
			||||||
          (model) => (model.available = available),
 | 
					          (model) => (model.available = available),
 | 
				
			||||||
        );
 | 
					        );
 | 
				
			||||||
 | 
					      } else if (name.startsWith("*")) {
 | 
				
			||||||
 | 
					        const modelId = name.substring(1).toLowerCase();
 | 
				
			||||||
 | 
					        Object.values(modelTable).forEach((model) => {
 | 
				
			||||||
 | 
					          if (model?.provider?.id === modelId) {
 | 
				
			||||||
 | 
					            model.available = available;
 | 
				
			||||||
 | 
					          }
 | 
				
			||||||
 | 
					        });
 | 
				
			||||||
      } else {
 | 
					      } else {
 | 
				
			||||||
        // 1. find model by name, and set available value
 | 
					        // 1. find model by name, and set available value
 | 
				
			||||||
        const [customModelName, customProviderName] = getModelProvider(name);
 | 
					        const [customModelName, customProviderName] = getModelProvider(name);
 | 
				
			||||||
 
 | 
				
			|||||||
							
								
								
									
										142
									
								
								test/model.test.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										142
									
								
								test/model.test.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,142 @@
 | 
				
			|||||||
 | 
					import { collectModelTable } from "@/app/utils/model"
 | 
				
			||||||
 | 
					import { LLMModel,LLMModelProvider } from "@/app/client/api";
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					describe('collectModelTable', () => {
 | 
				
			||||||
 | 
					  const mockModels: readonly LLMModel[] = [
 | 
				
			||||||
 | 
					    {
 | 
				
			||||||
 | 
					      name: 'gpt-3.5-turbo',
 | 
				
			||||||
 | 
					      available: true,
 | 
				
			||||||
 | 
					      provider: {
 | 
				
			||||||
 | 
					        id: 'openai',
 | 
				
			||||||
 | 
					        providerName: 'OpenAI',
 | 
				
			||||||
 | 
					        providerType: 'openai',
 | 
				
			||||||
 | 
					      } as LLMModelProvider,
 | 
				
			||||||
 | 
					      sorted: 1,
 | 
				
			||||||
 | 
					    },
 | 
				
			||||||
 | 
					    {
 | 
				
			||||||
 | 
					      name: 'gpt-4',
 | 
				
			||||||
 | 
					      available: true,
 | 
				
			||||||
 | 
					      provider: {
 | 
				
			||||||
 | 
					        id: 'openai',
 | 
				
			||||||
 | 
					        providerName: 'OpenAI',
 | 
				
			||||||
 | 
					        providerType: 'openai',
 | 
				
			||||||
 | 
					      } as LLMModelProvider,
 | 
				
			||||||
 | 
					      sorted: 1,
 | 
				
			||||||
 | 
					    },
 | 
				
			||||||
 | 
					    {
 | 
				
			||||||
 | 
					      name: 'gpt-3.5-turbo',
 | 
				
			||||||
 | 
					      available: true,
 | 
				
			||||||
 | 
					      provider: {
 | 
				
			||||||
 | 
					        id: 'azure',
 | 
				
			||||||
 | 
					        providerName: 'Azure',
 | 
				
			||||||
 | 
					        providerType: 'azure',
 | 
				
			||||||
 | 
					      } as LLMModelProvider,
 | 
				
			||||||
 | 
					      sorted: 2,
 | 
				
			||||||
 | 
					    },
 | 
				
			||||||
 | 
					    {
 | 
				
			||||||
 | 
					      name: 'gpt-4',
 | 
				
			||||||
 | 
					      available: true,
 | 
				
			||||||
 | 
					      provider: {
 | 
				
			||||||
 | 
					        id: 'azure',
 | 
				
			||||||
 | 
					        providerName: 'Azure',
 | 
				
			||||||
 | 
					        providerType: 'azure',
 | 
				
			||||||
 | 
					      } as LLMModelProvider,
 | 
				
			||||||
 | 
					      sorted: 2,
 | 
				
			||||||
 | 
					    },
 | 
				
			||||||
 | 
					    {
 | 
				
			||||||
 | 
					      name: 'gemini-pro',
 | 
				
			||||||
 | 
					      available: true,
 | 
				
			||||||
 | 
					      provider: {
 | 
				
			||||||
 | 
					        id: 'google',
 | 
				
			||||||
 | 
					        providerName: 'Google',
 | 
				
			||||||
 | 
					        providerType: 'google',
 | 
				
			||||||
 | 
					      } as LLMModelProvider,
 | 
				
			||||||
 | 
					      sorted: 3,
 | 
				
			||||||
 | 
					    },
 | 
				
			||||||
 | 
					    {
 | 
				
			||||||
 | 
					      name: 'claude-3-haiku-20240307',
 | 
				
			||||||
 | 
					      available: true,
 | 
				
			||||||
 | 
					      provider: {
 | 
				
			||||||
 | 
					        id: 'anthropic',
 | 
				
			||||||
 | 
					        providerName: 'Anthropic',
 | 
				
			||||||
 | 
					        providerType: 'anthropic',
 | 
				
			||||||
 | 
					      } as LLMModelProvider,
 | 
				
			||||||
 | 
					      sorted: 4,
 | 
				
			||||||
 | 
					    },
 | 
				
			||||||
 | 
					    {
 | 
				
			||||||
 | 
					      name: 'grok-beta',
 | 
				
			||||||
 | 
					      available: true,
 | 
				
			||||||
 | 
					      provider: {
 | 
				
			||||||
 | 
					        id: 'xai',
 | 
				
			||||||
 | 
					        providerName: 'XAI',
 | 
				
			||||||
 | 
					        providerType: 'xai',
 | 
				
			||||||
 | 
					      } as LLMModelProvider,
 | 
				
			||||||
 | 
					      sorted: 11,
 | 
				
			||||||
 | 
					    },
 | 
				
			||||||
 | 
					  ];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  test('all models shoule be available', () => {
 | 
				
			||||||
 | 
					    const customModels = '';
 | 
				
			||||||
 | 
					    const result = collectModelTable(mockModels, customModels);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['gpt-4@openai'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['gpt-3.5-turbo@azure'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['gpt-4@azure'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['gemini-pro@google'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['grok-beta@xai'].available).toBe(true);
 | 
				
			||||||
 | 
					  });
 | 
				
			||||||
 | 
					  test('should exclude all models when custom is "-all"', () => {
 | 
				
			||||||
 | 
					    const customModels = '-all';
 | 
				
			||||||
 | 
					    const result = collectModelTable(mockModels, customModels);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    expect(result['gpt-3.5-turbo@openai'].available).toBe(false);
 | 
				
			||||||
 | 
					    expect(result['gpt-4@openai'].available).toBe(false);
 | 
				
			||||||
 | 
					    expect(result['gpt-3.5-turbo@azure'].available).toBe(false);
 | 
				
			||||||
 | 
					    expect(result['gpt-4@azure'].available).toBe(false);
 | 
				
			||||||
 | 
					    expect(result['gemini-pro@google'].available).toBe(false);
 | 
				
			||||||
 | 
					    expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false);
 | 
				
			||||||
 | 
					    expect(result['grok-beta@xai'].available).toBe(false);
 | 
				
			||||||
 | 
					  });
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  test('should exclude all Azure models when custom is "-*azure"', () => {
 | 
				
			||||||
 | 
					    const customModels = '-*azure';
 | 
				
			||||||
 | 
					    const result = collectModelTable(mockModels, customModels);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['gpt-4@openai'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['gpt-3.5-turbo@azure'].available).toBe(false);
 | 
				
			||||||
 | 
					    expect(result['gpt-4@azure'].available).toBe(false);
 | 
				
			||||||
 | 
					    expect(result['gemini-pro@google'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['grok-beta@xai'].available).toBe(true);
 | 
				
			||||||
 | 
					  });
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  test('should exclude Google and XAI models when custom is "-*google,-*xai"', () => {
 | 
				
			||||||
 | 
					    const customModels = '-*google,-*xai';
 | 
				
			||||||
 | 
					    const result = collectModelTable(mockModels, customModels);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['gpt-4@openai'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['gpt-3.5-turbo@azure'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['gpt-4@azure'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['gemini-pro@google'].available).toBe(false);
 | 
				
			||||||
 | 
					    expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['grok-beta@xai'].available).toBe(false);
 | 
				
			||||||
 | 
					  });
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  test('All models except OpenAI should be excluded, and additional models should be added when customized as "-all, +*openai,gpt-4o@azure"', () => {
 | 
				
			||||||
 | 
					    const customModels = '-all,+*openai,gpt-4o@azure';
 | 
				
			||||||
 | 
					    const result = collectModelTable(mockModels, customModels);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['gpt-4@openai'].available).toBe(true);
 | 
				
			||||||
 | 
					    expect(result['gpt-3.5-turbo@azure'].available).toBe(false);
 | 
				
			||||||
 | 
					    expect(result['gpt-4@azure'].available).toBe(false);
 | 
				
			||||||
 | 
					    expect(result['gemini-pro@google'].available).toBe(false);
 | 
				
			||||||
 | 
					    expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false);
 | 
				
			||||||
 | 
					    expect(result['grok-beta@xai'].available).toBe(false);
 | 
				
			||||||
 | 
					    expect(result['gpt-4o@azure'].available).toBe(true);
 | 
				
			||||||
 | 
					  });
 | 
				
			||||||
 | 
					});
 | 
				
			||||||
		Reference in New Issue
	
	Block a user