Compare commits

...

14 Commits

Author SHA1 Message Date
Q.A.zh
0487542c38
Merge 69fcb92a3b into 995bef73de 2025-08-11 16:16:00 +08:00
RiverRay
995bef73de
Merge pull request #6599 from DreamRivulet/add-support-GPT5
Some checks failed
Run Tests / test (push) Has been cancelled
add: model gpt-5
2025-08-10 17:21:12 +08:00
Sam
38ac502d80 Add support for GPT5 2025-08-09 17:03:49 +08:00
Sam
0511808900 use max_completion_tokens 2025-08-09 17:03:49 +08:00
Sam
42eff644b4 use max_completion_tokens 2025-08-09 17:03:49 +08:00
Sam
8ae6883784 add gpt-5 2025-08-09 17:03:49 +08:00
Sam
c0f2ab6de3 add gpt-5 2025-08-09 17:03:06 +08:00
Q.A.zh
69fcb92a3b
移除空格,增加custom_models 容错性 2024-12-30 02:54:15 +00:00
Q.A.zh
3b5b496599
modify test comment 2024-12-28 15:53:35 +00:00
Q.A.zh
2db4caace4
fix model name 2024-12-28 15:43:27 +00:00
Q.A.zh
fd8ad63655
fix error model name 2024-12-28 15:38:01 +00:00
Q.A.zh
c41c2b538a
Remove the empty array slot 2024-12-28 09:14:27 +00:00
Q.A.zh
4d6c82deb9
add collectModelTable unit test 2024-12-28 09:09:41 +00:00
Q.A.zh
b09f458aeb Introducing the ability to remove specified Providers. 2024-12-28 04:17:22 +00:00
5 changed files with 171 additions and 4 deletions

View File

@ -313,12 +313,15 @@ If you want to disable parse settings from url, set this to 1.
To control custom models, use `+` to add a custom model, use `-` to hide a model, use `name=displayName` to customize model name, separated by comma.
User `-all` to disable all default models, `+all` to enable all default models.
Use `-all` to disable all default models, `+all` to enable all default models.
Use `-*provider` to disable specified models.
Current valid providers: `openai,azure,google,anthropic,baidu,bytedance,alibaba,tencent,moonshot,iflytek,xai,chatglm` and more to come.
For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name.
> Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list.
> If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list.
> If you don't want to use Azure model, using `-*azure` will prevent Azure models from appearing in the model list.
For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name.

View File

@ -200,6 +200,7 @@ export class ChatGPTApi implements LLMApi {
options.config.model.startsWith("o1") ||
options.config.model.startsWith("o3") ||
options.config.model.startsWith("o4-mini");
const isGpt5 = options.config.model.startsWith("gpt-5");
if (isDalle3) {
const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any,
@ -230,7 +231,7 @@ export class ChatGPTApi implements LLMApi {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: !isO1OrO3 ? modelConfig.temperature : 1,
temperature: (!isO1OrO3 && !isGpt5) ? modelConfig.temperature : 1,
presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
top_p: !isO1OrO3 ? modelConfig.top_p : 1,
@ -238,7 +239,13 @@ export class ChatGPTApi implements LLMApi {
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
if (isO1OrO3) {
if (isGpt5) {
// Remove max_tokens if present
delete requestPayload.max_tokens;
// Add max_completion_tokens (or max_completion_tokens if that's what you meant)
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
} else if (isO1OrO3) {
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
@ -251,8 +258,9 @@ export class ChatGPTApi implements LLMApi {
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
}
// add max_tokens to vision model
if (visionModel && !isO1OrO3) {
if (visionModel && !isO1OrO3 && ! isGpt5) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
}
}

View File

@ -493,6 +493,7 @@ export const VISION_MODEL_REGEXES = [
/o3/,
/o4-mini/,
/grok-4/i,
/gpt-5/
];
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
@ -517,6 +518,11 @@ const openaiModels = [
"gpt-4.1-nano-2025-04-14",
"gpt-4.5-preview",
"gpt-4.5-preview-2025-02-27",
"gpt-5-chat",
"gpt-5-mini",
"gpt-5-nano",
"gpt-5",
"gpt-5-chat-2025-01-01-preview",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",

View File

@ -76,6 +76,7 @@ export function collectModelTable(
// server custom models
customModels
.split(",")
.map((v) => v.trim())
.filter((v) => !!v && v.length > 0)
.forEach((m) => {
const available = !m.startsWith("-");
@ -88,6 +89,13 @@ export function collectModelTable(
Object.values(modelTable).forEach(
(model) => (model.available = available),
);
} else if (name.startsWith("*")) {
const modelId = name.substring(1).toLowerCase();
Object.values(modelTable).forEach((model) => {
if (model?.provider?.id === modelId) {
model.available = available;
}
});
} else {
// 1. find model by name, and set available value
const [customModelName, customProviderName] = getModelProvider(name);

142
test/model.test.ts Normal file
View File

@ -0,0 +1,142 @@
import { collectModelTable } from "@/app/utils/model"
import { LLMModel,LLMModelProvider } from "@/app/client/api";
describe('collectModelTable', () => {
const mockModels: readonly LLMModel[] = [
{
name: 'gpt-3.5-turbo',
available: true,
provider: {
id: 'openai',
providerName: 'OpenAI',
providerType: 'openai',
} as LLMModelProvider,
sorted: 1,
},
{
name: 'gpt-4',
available: true,
provider: {
id: 'openai',
providerName: 'OpenAI',
providerType: 'openai',
} as LLMModelProvider,
sorted: 1,
},
{
name: 'gpt-3.5-turbo',
available: true,
provider: {
id: 'azure',
providerName: 'Azure',
providerType: 'azure',
} as LLMModelProvider,
sorted: 2,
},
{
name: 'gpt-4',
available: true,
provider: {
id: 'azure',
providerName: 'Azure',
providerType: 'azure',
} as LLMModelProvider,
sorted: 2,
},
{
name: 'gemini-pro',
available: true,
provider: {
id: 'google',
providerName: 'Google',
providerType: 'google',
} as LLMModelProvider,
sorted: 3,
},
{
name: 'claude-3-haiku-20240307',
available: true,
provider: {
id: 'anthropic',
providerName: 'Anthropic',
providerType: 'anthropic',
} as LLMModelProvider,
sorted: 4,
},
{
name: 'grok-beta',
available: true,
provider: {
id: 'xai',
providerName: 'XAI',
providerType: 'xai',
} as LLMModelProvider,
sorted: 11,
},
];
test('all models shoule be available', () => {
const customModels = '';
const result = collectModelTable(mockModels, customModels);
expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
expect(result['gpt-4@openai'].available).toBe(true);
expect(result['gpt-3.5-turbo@azure'].available).toBe(true);
expect(result['gpt-4@azure'].available).toBe(true);
expect(result['gemini-pro@google'].available).toBe(true);
expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true);
expect(result['grok-beta@xai'].available).toBe(true);
});
test('should exclude all models when custom is "-all"', () => {
const customModels = '-all';
const result = collectModelTable(mockModels, customModels);
expect(result['gpt-3.5-turbo@openai'].available).toBe(false);
expect(result['gpt-4@openai'].available).toBe(false);
expect(result['gpt-3.5-turbo@azure'].available).toBe(false);
expect(result['gpt-4@azure'].available).toBe(false);
expect(result['gemini-pro@google'].available).toBe(false);
expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false);
expect(result['grok-beta@xai'].available).toBe(false);
});
test('should exclude all Azure models when custom is "-*azure"', () => {
const customModels = '-*azure';
const result = collectModelTable(mockModels, customModels);
expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
expect(result['gpt-4@openai'].available).toBe(true);
expect(result['gpt-3.5-turbo@azure'].available).toBe(false);
expect(result['gpt-4@azure'].available).toBe(false);
expect(result['gemini-pro@google'].available).toBe(true);
expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true);
expect(result['grok-beta@xai'].available).toBe(true);
});
test('should exclude Google and XAI models when custom is "-*google,-*xai"', () => {
const customModels = '-*google,-*xai';
const result = collectModelTable(mockModels, customModels);
expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
expect(result['gpt-4@openai'].available).toBe(true);
expect(result['gpt-3.5-turbo@azure'].available).toBe(true);
expect(result['gpt-4@azure'].available).toBe(true);
expect(result['gemini-pro@google'].available).toBe(false);
expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true);
expect(result['grok-beta@xai'].available).toBe(false);
});
test('All models except OpenAI should be excluded, and additional models should be added when customized as "-all, +*openai,gpt-4o@azure"', () => {
const customModels = '-all,+*openai,gpt-4o@azure';
const result = collectModelTable(mockModels, customModels);
expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
expect(result['gpt-4@openai'].available).toBe(true);
expect(result['gpt-3.5-turbo@azure'].available).toBe(false);
expect(result['gpt-4@azure'].available).toBe(false);
expect(result['gemini-pro@google'].available).toBe(false);
expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false);
expect(result['grok-beta@xai'].available).toBe(false);
expect(result['gpt-4o@azure'].available).toBe(true);
});
});