From 630b3af441d4cd22bce8690f04889b781a5f3b56 Mon Sep 17 00:00:00 2001 From: JiangYingjin Date: Thu, 22 May 2025 02:49:21 +0800 Subject: [PATCH] =?UTF-8?q?=E5=B0=86=20o1=20=E6=94=B9=E6=88=90=20o=20?= =?UTF-8?q?=E7=B3=BB=E5=88=97?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/client/platforms/openai.ts | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 6d154251e..db40244f4 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -197,7 +197,7 @@ export class ChatGPTApi implements LLMApi { const accessStore = useAccessStore.getState(); const isDalle3 = _isDalle3(options.config.model); - const isO1 = options.config.model.startsWith("o1"); + const isOseries = options.config.model.match(/^o\d/) !== null; if (isDalle3) { const prompt = getMessageTextContent( options.messages.slice(-1)?.pop() as any, @@ -222,7 +222,7 @@ export class ChatGPTApi implements LLMApi { const content = visionModel ? await preProcessImageContent(v.content) : getMessageTextContent(v); - if (!(isO1 && v.role === "system")) + if (!(isOseries && v.role === "system")) messages.push({ role: v.role, content }); } @@ -231,16 +231,16 @@ export class ChatGPTApi implements LLMApi { messages, stream: options.config.stream, model: modelConfig.model, - temperature: !isO1 ? modelConfig.temperature : 1, - presence_penalty: !isO1 ? modelConfig.presence_penalty : 0, - frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0, - top_p: !isO1 ? modelConfig.top_p : 1, + temperature: !isOseries ? modelConfig.temperature : 1, + presence_penalty: !isOseries ? modelConfig.presence_penalty : 0, + frequency_penalty: !isOseries ? modelConfig.frequency_penalty : 0, + top_p: !isOseries ? modelConfig.top_p : 1, // max_tokens: Math.max(modelConfig.max_tokens, 1024), // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. }; // O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs) - if (isO1) { + if (isOseries) { requestPayload["max_completion_tokens"] = modelConfig.max_tokens; } @@ -364,7 +364,7 @@ export class ChatGPTApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - isDalle3 || isO1 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. + isDalle3 || isOseries ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. ); const res = await fetch(chatPath, chatPayload);