mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-09-30 23:26:39 +08:00
Compare commits
14 Commits
6207c40ac9
...
0487542c38
Author | SHA1 | Date | |
---|---|---|---|
|
0487542c38 | ||
|
995bef73de | ||
|
38ac502d80 | ||
|
0511808900 | ||
|
42eff644b4 | ||
|
8ae6883784 | ||
|
c0f2ab6de3 | ||
|
69fcb92a3b | ||
|
3b5b496599 | ||
|
2db4caace4 | ||
|
fd8ad63655 | ||
|
c41c2b538a | ||
|
4d6c82deb9 | ||
|
b09f458aeb |
@ -313,12 +313,15 @@ If you want to disable parse settings from url, set this to 1.
|
||||
|
||||
To control custom models, use `+` to add a custom model, use `-` to hide a model, use `name=displayName` to customize model name, separated by comma.
|
||||
|
||||
User `-all` to disable all default models, `+all` to enable all default models.
|
||||
Use `-all` to disable all default models, `+all` to enable all default models.
|
||||
Use `-*provider` to disable specified models.
|
||||
Current valid providers: `openai,azure,google,anthropic,baidu,bytedance,alibaba,tencent,moonshot,iflytek,xai,chatglm` and more to come.
|
||||
|
||||
For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name.
|
||||
|
||||
> Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list.
|
||||
> If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list.
|
||||
> If you don't want to use Azure model, using `-*azure` will prevent Azure models from appearing in the model list.
|
||||
|
||||
For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name.
|
||||
|
||||
|
@ -200,6 +200,7 @@ export class ChatGPTApi implements LLMApi {
|
||||
options.config.model.startsWith("o1") ||
|
||||
options.config.model.startsWith("o3") ||
|
||||
options.config.model.startsWith("o4-mini");
|
||||
const isGpt5 = options.config.model.startsWith("gpt-5");
|
||||
if (isDalle3) {
|
||||
const prompt = getMessageTextContent(
|
||||
options.messages.slice(-1)?.pop() as any,
|
||||
@ -230,7 +231,7 @@ export class ChatGPTApi implements LLMApi {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: !isO1OrO3 ? modelConfig.temperature : 1,
|
||||
temperature: (!isO1OrO3 && !isGpt5) ? modelConfig.temperature : 1,
|
||||
presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
|
||||
frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
|
||||
top_p: !isO1OrO3 ? modelConfig.top_p : 1,
|
||||
@ -238,7 +239,13 @@ export class ChatGPTApi implements LLMApi {
|
||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||
};
|
||||
|
||||
if (isO1OrO3) {
|
||||
if (isGpt5) {
|
||||
// Remove max_tokens if present
|
||||
delete requestPayload.max_tokens;
|
||||
// Add max_completion_tokens (or max_completion_tokens if that's what you meant)
|
||||
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||
|
||||
} else if (isO1OrO3) {
|
||||
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
|
||||
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
|
||||
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
|
||||
@ -251,8 +258,9 @@ export class ChatGPTApi implements LLMApi {
|
||||
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||
}
|
||||
|
||||
|
||||
// add max_tokens to vision model
|
||||
if (visionModel && !isO1OrO3) {
|
||||
if (visionModel && !isO1OrO3 && ! isGpt5) {
|
||||
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
||||
}
|
||||
}
|
||||
|
@ -493,6 +493,7 @@ export const VISION_MODEL_REGEXES = [
|
||||
/o3/,
|
||||
/o4-mini/,
|
||||
/grok-4/i,
|
||||
/gpt-5/
|
||||
];
|
||||
|
||||
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
|
||||
@ -517,6 +518,11 @@ const openaiModels = [
|
||||
"gpt-4.1-nano-2025-04-14",
|
||||
"gpt-4.5-preview",
|
||||
"gpt-4.5-preview-2025-02-27",
|
||||
"gpt-5-chat",
|
||||
"gpt-5-mini",
|
||||
"gpt-5-nano",
|
||||
"gpt-5",
|
||||
"gpt-5-chat-2025-01-01-preview",
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
|
@ -76,6 +76,7 @@ export function collectModelTable(
|
||||
// server custom models
|
||||
customModels
|
||||
.split(",")
|
||||
.map((v) => v.trim())
|
||||
.filter((v) => !!v && v.length > 0)
|
||||
.forEach((m) => {
|
||||
const available = !m.startsWith("-");
|
||||
@ -88,6 +89,13 @@ export function collectModelTable(
|
||||
Object.values(modelTable).forEach(
|
||||
(model) => (model.available = available),
|
||||
);
|
||||
} else if (name.startsWith("*")) {
|
||||
const modelId = name.substring(1).toLowerCase();
|
||||
Object.values(modelTable).forEach((model) => {
|
||||
if (model?.provider?.id === modelId) {
|
||||
model.available = available;
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// 1. find model by name, and set available value
|
||||
const [customModelName, customProviderName] = getModelProvider(name);
|
||||
|
142
test/model.test.ts
Normal file
142
test/model.test.ts
Normal file
@ -0,0 +1,142 @@
|
||||
import { collectModelTable } from "@/app/utils/model"
|
||||
import { LLMModel,LLMModelProvider } from "@/app/client/api";
|
||||
|
||||
describe('collectModelTable', () => {
|
||||
const mockModels: readonly LLMModel[] = [
|
||||
{
|
||||
name: 'gpt-3.5-turbo',
|
||||
available: true,
|
||||
provider: {
|
||||
id: 'openai',
|
||||
providerName: 'OpenAI',
|
||||
providerType: 'openai',
|
||||
} as LLMModelProvider,
|
||||
sorted: 1,
|
||||
},
|
||||
{
|
||||
name: 'gpt-4',
|
||||
available: true,
|
||||
provider: {
|
||||
id: 'openai',
|
||||
providerName: 'OpenAI',
|
||||
providerType: 'openai',
|
||||
} as LLMModelProvider,
|
||||
sorted: 1,
|
||||
},
|
||||
{
|
||||
name: 'gpt-3.5-turbo',
|
||||
available: true,
|
||||
provider: {
|
||||
id: 'azure',
|
||||
providerName: 'Azure',
|
||||
providerType: 'azure',
|
||||
} as LLMModelProvider,
|
||||
sorted: 2,
|
||||
},
|
||||
{
|
||||
name: 'gpt-4',
|
||||
available: true,
|
||||
provider: {
|
||||
id: 'azure',
|
||||
providerName: 'Azure',
|
||||
providerType: 'azure',
|
||||
} as LLMModelProvider,
|
||||
sorted: 2,
|
||||
},
|
||||
{
|
||||
name: 'gemini-pro',
|
||||
available: true,
|
||||
provider: {
|
||||
id: 'google',
|
||||
providerName: 'Google',
|
||||
providerType: 'google',
|
||||
} as LLMModelProvider,
|
||||
sorted: 3,
|
||||
},
|
||||
{
|
||||
name: 'claude-3-haiku-20240307',
|
||||
available: true,
|
||||
provider: {
|
||||
id: 'anthropic',
|
||||
providerName: 'Anthropic',
|
||||
providerType: 'anthropic',
|
||||
} as LLMModelProvider,
|
||||
sorted: 4,
|
||||
},
|
||||
{
|
||||
name: 'grok-beta',
|
||||
available: true,
|
||||
provider: {
|
||||
id: 'xai',
|
||||
providerName: 'XAI',
|
||||
providerType: 'xai',
|
||||
} as LLMModelProvider,
|
||||
sorted: 11,
|
||||
},
|
||||
];
|
||||
|
||||
test('all models shoule be available', () => {
|
||||
const customModels = '';
|
||||
const result = collectModelTable(mockModels, customModels);
|
||||
|
||||
expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
|
||||
expect(result['gpt-4@openai'].available).toBe(true);
|
||||
expect(result['gpt-3.5-turbo@azure'].available).toBe(true);
|
||||
expect(result['gpt-4@azure'].available).toBe(true);
|
||||
expect(result['gemini-pro@google'].available).toBe(true);
|
||||
expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true);
|
||||
expect(result['grok-beta@xai'].available).toBe(true);
|
||||
});
|
||||
test('should exclude all models when custom is "-all"', () => {
|
||||
const customModels = '-all';
|
||||
const result = collectModelTable(mockModels, customModels);
|
||||
|
||||
expect(result['gpt-3.5-turbo@openai'].available).toBe(false);
|
||||
expect(result['gpt-4@openai'].available).toBe(false);
|
||||
expect(result['gpt-3.5-turbo@azure'].available).toBe(false);
|
||||
expect(result['gpt-4@azure'].available).toBe(false);
|
||||
expect(result['gemini-pro@google'].available).toBe(false);
|
||||
expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false);
|
||||
expect(result['grok-beta@xai'].available).toBe(false);
|
||||
});
|
||||
|
||||
test('should exclude all Azure models when custom is "-*azure"', () => {
|
||||
const customModels = '-*azure';
|
||||
const result = collectModelTable(mockModels, customModels);
|
||||
|
||||
expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
|
||||
expect(result['gpt-4@openai'].available).toBe(true);
|
||||
expect(result['gpt-3.5-turbo@azure'].available).toBe(false);
|
||||
expect(result['gpt-4@azure'].available).toBe(false);
|
||||
expect(result['gemini-pro@google'].available).toBe(true);
|
||||
expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true);
|
||||
expect(result['grok-beta@xai'].available).toBe(true);
|
||||
});
|
||||
|
||||
test('should exclude Google and XAI models when custom is "-*google,-*xai"', () => {
|
||||
const customModels = '-*google,-*xai';
|
||||
const result = collectModelTable(mockModels, customModels);
|
||||
|
||||
expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
|
||||
expect(result['gpt-4@openai'].available).toBe(true);
|
||||
expect(result['gpt-3.5-turbo@azure'].available).toBe(true);
|
||||
expect(result['gpt-4@azure'].available).toBe(true);
|
||||
expect(result['gemini-pro@google'].available).toBe(false);
|
||||
expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true);
|
||||
expect(result['grok-beta@xai'].available).toBe(false);
|
||||
});
|
||||
|
||||
test('All models except OpenAI should be excluded, and additional models should be added when customized as "-all, +*openai,gpt-4o@azure"', () => {
|
||||
const customModels = '-all,+*openai,gpt-4o@azure';
|
||||
const result = collectModelTable(mockModels, customModels);
|
||||
|
||||
expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
|
||||
expect(result['gpt-4@openai'].available).toBe(true);
|
||||
expect(result['gpt-3.5-turbo@azure'].available).toBe(false);
|
||||
expect(result['gpt-4@azure'].available).toBe(false);
|
||||
expect(result['gemini-pro@google'].available).toBe(false);
|
||||
expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false);
|
||||
expect(result['grok-beta@xai'].available).toBe(false);
|
||||
expect(result['gpt-4o@azure'].available).toBe(true);
|
||||
});
|
||||
});
|
Loading…
Reference in New Issue
Block a user