diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 5a110b84b..c35ad1040 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -196,6 +196,7 @@ export class ChatGPTApi implements LLMApi { const isDalle3 = _isDalle3(options.config.model); const isO1 = options.config.model.startsWith("o1"); + const isO3 = options.config.model.startsWith("o3"); if (isDalle3) { const prompt = getMessageTextContent( options.messages.slice(-1)?.pop() as any, @@ -217,7 +218,7 @@ export class ChatGPTApi implements LLMApi { const content = visionModel ? await preProcessImageContent(v.content) : getMessageTextContent(v); - if (!(isO1 && v.role === "system")) + if (!((isO1 || isO3) && v.role === "system")) messages.push({ role: v.role, content }); } @@ -226,16 +227,16 @@ export class ChatGPTApi implements LLMApi { messages, stream: options.config.stream, model: modelConfig.model, - temperature: !isO1 ? modelConfig.temperature : 1, - presence_penalty: !isO1 ? modelConfig.presence_penalty : 0, - frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0, - top_p: !isO1 ? modelConfig.top_p : 1, + temperature: !(isO1 || isO3)? modelConfig.temperature : 1, + presence_penalty: !(isO1 || isO3) ? modelConfig.presence_penalty : 0, + frequency_penalty: !(isO1 || isO3) ? modelConfig.frequency_penalty : 0, + top_p: !(isO1 || isO3) ? modelConfig.top_p : 1, // max_tokens: Math.max(modelConfig.max_tokens, 1024), // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. }; // O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs) - if (isO1) { + if (isO1 || isO3) { requestPayload["max_completion_tokens"] = modelConfig.max_tokens; }