From 0534b21acde93bed11475b92731f5c8da8f3bd4e Mon Sep 17 00:00:00 2001 From: richardclim <61879224+richardclim@users.noreply.github.com> Date: Wed, 19 Feb 2025 03:45:18 -0500 Subject: [PATCH] fix include_reasoning from propagating in non openrouter endpoints --- .gitignore | 1 + app/client/platforms/openai.ts | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index b1c2bfefa..45219b5a0 100644 --- a/.gitignore +++ b/.gitignore @@ -49,3 +49,4 @@ masks.json # mcp config app/mcp/mcp_config.json +.aider* diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 15313c807..67b366b0f 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -234,7 +234,7 @@ export class ChatGPTApi implements LLMApi { presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0, frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0, top_p: !isO1OrO3 ? modelConfig.top_p : 1, - include_reasoning: true, + ...(modelConfig.providerName !== ServiceProvider.Azure && {include_reasoning: true}), // max_tokens: Math.max(modelConfig.max_tokens, 1024), // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. };