diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 4cad6bf94..51ae71ea6 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -56,7 +56,7 @@ export interface OpenAIListModelResponse { export interface RequestPayload { messages: { - role: "system" | "user" | "assistant"; + role: "developer" | "system" | "user" | "assistant"; content: string | MultimodalContent[]; }[]; stream?: boolean; @@ -238,8 +238,16 @@ export class ChatGPTApi implements LLMApi { // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. }; - // O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs) if (isO1OrO3) { + // by default the o1/o3 models will not attempt to produce output that includes markdown formatting + // manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses + // (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output) + requestPayload["messages"].unshift({ + role: "developer", + content: "Formatting re-enabled", + }); + + // o1/o3 uses max_completion_tokens to control the number of tokens (https://platform.openai.com/docs/guides/reasoning#controlling-costs) requestPayload["max_completion_tokens"] = modelConfig.max_tokens; } diff --git a/app/constant.ts b/app/constant.ts index 54088a532..beed3e48f 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -633,6 +633,18 @@ const xAIModes = [ "grok-2-vision-1212", "grok-2-vision", "grok-2-vision-latest", + "grok-3-mini-fast-beta", + "grok-3-mini-fast", + "grok-3-mini-fast-latest", + "grok-3-mini-beta", + "grok-3-mini", + "grok-3-mini-latest", + "grok-3-fast-beta", + "grok-3-fast", + "grok-3-fast-latest", + "grok-3-beta", + "grok-3", + "grok-3-latest", ]; const chatglmModels = [