Compare commits

..

10 Commits

Author SHA1 Message Date
JU-NINE NGU CHO
f0003b47d5 Merge d5c86ce9ab into c3b8c1587c 2025-10-04 19:48:02 +01:00
RiverRay
c3b8c1587c Merge pull request #6637 from princeaden1/feat-xai-new-models
Some checks failed
Run Tests / test (push) Has been cancelled
Upstream Sync / Sync latest commits from upstream repo (push) Has been cancelled
feat: new models for xAI (#6559)
2025-09-29 19:37:17 +08:00
Adekunle
12ec11ed8a feat: new models for xAI (#6559) 2025-09-20 00:09:59 +01:00
RiverRay
995bef73de Merge pull request #6599 from DreamRivulet/add-support-GPT5
Some checks failed
Run Tests / test (push) Has been cancelled
add: model gpt-5
2025-08-10 17:21:12 +08:00
Sam
38ac502d80 Add support for GPT5 2025-08-09 17:03:49 +08:00
Sam
0511808900 use max_completion_tokens 2025-08-09 17:03:49 +08:00
Sam
42eff644b4 use max_completion_tokens 2025-08-09 17:03:49 +08:00
Sam
8ae6883784 add gpt-5 2025-08-09 17:03:49 +08:00
Sam
c0f2ab6de3 add gpt-5 2025-08-09 17:03:06 +08:00
chojuninengu
d5c86ce9ab Enhance Webpack configuration by adding fallbacks for Node.js core modules and integrating ProvidePlugin for Buffer and process support. 2025-04-29 10:56:55 +01:00
3 changed files with 38 additions and 3 deletions

View File

@@ -200,6 +200,7 @@ export class ChatGPTApi implements LLMApi {
options.config.model.startsWith("o1") || options.config.model.startsWith("o1") ||
options.config.model.startsWith("o3") || options.config.model.startsWith("o3") ||
options.config.model.startsWith("o4-mini"); options.config.model.startsWith("o4-mini");
const isGpt5 = options.config.model.startsWith("gpt-5");
if (isDalle3) { if (isDalle3) {
const prompt = getMessageTextContent( const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any, options.messages.slice(-1)?.pop() as any,
@@ -230,7 +231,7 @@ export class ChatGPTApi implements LLMApi {
messages, messages,
stream: options.config.stream, stream: options.config.stream,
model: modelConfig.model, model: modelConfig.model,
temperature: !isO1OrO3 ? modelConfig.temperature : 1, temperature: (!isO1OrO3 && !isGpt5) ? modelConfig.temperature : 1,
presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0, presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0, frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
top_p: !isO1OrO3 ? modelConfig.top_p : 1, top_p: !isO1OrO3 ? modelConfig.top_p : 1,
@@ -238,7 +239,13 @@ export class ChatGPTApi implements LLMApi {
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
}; };
if (isO1OrO3) { if (isGpt5) {
// Remove max_tokens if present
delete requestPayload.max_tokens;
// Add max_completion_tokens (or max_completion_tokens if that's what you meant)
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
} else if (isO1OrO3) {
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting // by default the o1/o3 models will not attempt to produce output that includes markdown formatting
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses // manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output) // (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
@@ -251,8 +258,9 @@ export class ChatGPTApi implements LLMApi {
requestPayload["max_completion_tokens"] = modelConfig.max_tokens; requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
} }
// add max_tokens to vision model // add max_tokens to vision model
if (visionModel && !isO1OrO3) { if (visionModel && !isO1OrO3 && ! isGpt5) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
} }
} }

View File

@@ -493,6 +493,7 @@ export const VISION_MODEL_REGEXES = [
/o3/, /o3/,
/o4-mini/, /o4-mini/,
/grok-4/i, /grok-4/i,
/gpt-5/
]; ];
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/]; export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
@@ -517,6 +518,11 @@ const openaiModels = [
"gpt-4.1-nano-2025-04-14", "gpt-4.1-nano-2025-04-14",
"gpt-4.5-preview", "gpt-4.5-preview",
"gpt-4.5-preview-2025-02-27", "gpt-4.5-preview-2025-02-27",
"gpt-5-chat",
"gpt-5-mini",
"gpt-5-nano",
"gpt-5",
"gpt-5-chat-2025-01-01-preview",
"gpt-4o", "gpt-4o",
"gpt-4o-2024-05-13", "gpt-4o-2024-05-13",
"gpt-4o-2024-08-06", "gpt-4o-2024-08-06",
@@ -667,6 +673,11 @@ const xAIModes = [
"grok-3-beta", "grok-3-beta",
"grok-3", "grok-3",
"grok-3-latest", "grok-3-latest",
"grok-4",
"grok-4-0709",
"grok-4-fast-non-reasoning",
"grok-4-fast-reasoning",
"grok-code-fast-1",
]; ];
const chatglmModels = [ const chatglmModels = [

View File

@@ -21,9 +21,25 @@ const nextConfig = {
} }
config.resolve.fallback = { config.resolve.fallback = {
...config.resolve.fallback,
child_process: false, child_process: false,
process: false,
path: false,
fs: false,
os: false,
util: false,
stream: false,
buffer: require.resolve('buffer/'),
events: require.resolve('events/'),
}; };
config.plugins.push(
new webpack.ProvidePlugin({
Buffer: ['buffer', 'Buffer'],
process: 'process/browser',
}),
);
return config; return config;
}, },
output: mode, output: mode,