mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-11-11 03:33:46 +08:00
Compare commits
10 Commits
v2.16.1
...
f0003b47d5
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f0003b47d5 | ||
|
|
c3b8c1587c | ||
|
|
12ec11ed8a | ||
|
|
995bef73de | ||
|
|
38ac502d80 | ||
|
|
0511808900 | ||
|
|
42eff644b4 | ||
|
|
8ae6883784 | ||
|
|
c0f2ab6de3 | ||
|
|
d5c86ce9ab |
@@ -200,6 +200,7 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
options.config.model.startsWith("o1") ||
|
options.config.model.startsWith("o1") ||
|
||||||
options.config.model.startsWith("o3") ||
|
options.config.model.startsWith("o3") ||
|
||||||
options.config.model.startsWith("o4-mini");
|
options.config.model.startsWith("o4-mini");
|
||||||
|
const isGpt5 = options.config.model.startsWith("gpt-5");
|
||||||
if (isDalle3) {
|
if (isDalle3) {
|
||||||
const prompt = getMessageTextContent(
|
const prompt = getMessageTextContent(
|
||||||
options.messages.slice(-1)?.pop() as any,
|
options.messages.slice(-1)?.pop() as any,
|
||||||
@@ -230,7 +231,7 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
messages,
|
messages,
|
||||||
stream: options.config.stream,
|
stream: options.config.stream,
|
||||||
model: modelConfig.model,
|
model: modelConfig.model,
|
||||||
temperature: !isO1OrO3 ? modelConfig.temperature : 1,
|
temperature: (!isO1OrO3 && !isGpt5) ? modelConfig.temperature : 1,
|
||||||
presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
|
presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
|
||||||
frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
|
frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
|
||||||
top_p: !isO1OrO3 ? modelConfig.top_p : 1,
|
top_p: !isO1OrO3 ? modelConfig.top_p : 1,
|
||||||
@@ -238,7 +239,13 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||||
};
|
};
|
||||||
|
|
||||||
if (isO1OrO3) {
|
if (isGpt5) {
|
||||||
|
// Remove max_tokens if present
|
||||||
|
delete requestPayload.max_tokens;
|
||||||
|
// Add max_completion_tokens (or max_completion_tokens if that's what you meant)
|
||||||
|
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||||
|
|
||||||
|
} else if (isO1OrO3) {
|
||||||
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
|
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
|
||||||
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
|
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
|
||||||
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
|
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
|
||||||
@@ -251,8 +258,9 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// add max_tokens to vision model
|
// add max_tokens to vision model
|
||||||
if (visionModel && !isO1OrO3) {
|
if (visionModel && !isO1OrO3 && ! isGpt5) {
|
||||||
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -493,6 +493,7 @@ export const VISION_MODEL_REGEXES = [
|
|||||||
/o3/,
|
/o3/,
|
||||||
/o4-mini/,
|
/o4-mini/,
|
||||||
/grok-4/i,
|
/grok-4/i,
|
||||||
|
/gpt-5/
|
||||||
];
|
];
|
||||||
|
|
||||||
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
|
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
|
||||||
@@ -517,6 +518,11 @@ const openaiModels = [
|
|||||||
"gpt-4.1-nano-2025-04-14",
|
"gpt-4.1-nano-2025-04-14",
|
||||||
"gpt-4.5-preview",
|
"gpt-4.5-preview",
|
||||||
"gpt-4.5-preview-2025-02-27",
|
"gpt-4.5-preview-2025-02-27",
|
||||||
|
"gpt-5-chat",
|
||||||
|
"gpt-5-mini",
|
||||||
|
"gpt-5-nano",
|
||||||
|
"gpt-5",
|
||||||
|
"gpt-5-chat-2025-01-01-preview",
|
||||||
"gpt-4o",
|
"gpt-4o",
|
||||||
"gpt-4o-2024-05-13",
|
"gpt-4o-2024-05-13",
|
||||||
"gpt-4o-2024-08-06",
|
"gpt-4o-2024-08-06",
|
||||||
@@ -667,6 +673,11 @@ const xAIModes = [
|
|||||||
"grok-3-beta",
|
"grok-3-beta",
|
||||||
"grok-3",
|
"grok-3",
|
||||||
"grok-3-latest",
|
"grok-3-latest",
|
||||||
|
"grok-4",
|
||||||
|
"grok-4-0709",
|
||||||
|
"grok-4-fast-non-reasoning",
|
||||||
|
"grok-4-fast-reasoning",
|
||||||
|
"grok-code-fast-1",
|
||||||
];
|
];
|
||||||
|
|
||||||
const chatglmModels = [
|
const chatglmModels = [
|
||||||
|
|||||||
@@ -21,9 +21,25 @@ const nextConfig = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
config.resolve.fallback = {
|
config.resolve.fallback = {
|
||||||
|
...config.resolve.fallback,
|
||||||
child_process: false,
|
child_process: false,
|
||||||
|
process: false,
|
||||||
|
path: false,
|
||||||
|
fs: false,
|
||||||
|
os: false,
|
||||||
|
util: false,
|
||||||
|
stream: false,
|
||||||
|
buffer: require.resolve('buffer/'),
|
||||||
|
events: require.resolve('events/'),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
config.plugins.push(
|
||||||
|
new webpack.ProvidePlugin({
|
||||||
|
Buffer: ['buffer', 'Buffer'],
|
||||||
|
process: 'process/browser',
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
return config;
|
return config;
|
||||||
},
|
},
|
||||||
output: mode,
|
output: mode,
|
||||||
|
|||||||
Reference in New Issue
Block a user