From cd0366392ae20920ec6590e700d34d57b4a46dc0 Mon Sep 17 00:00:00 2001 From: AC Date: Wed, 11 Jun 2025 15:25:38 +0800 Subject: [PATCH] change max tokens --- app/api/bedrock/index.ts | 2 +- app/client/platforms/bedrock.ts | 2 +- app/client/platforms/openai.ts | 2 +- app/store/chat.ts | 2 +- app/store/config.ts | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/app/api/bedrock/index.ts b/app/api/bedrock/index.ts index a35cfc55a..112fa9c38 100644 --- a/app/api/bedrock/index.ts +++ b/app/api/bedrock/index.ts @@ -374,7 +374,7 @@ export async function handle( const payload = { anthropic_version: "bedrock-2023-05-31", max_tokens: - typeof max_tokens === "number" && max_tokens > 0 ? max_tokens : 4096, + typeof max_tokens === "number" && max_tokens > 0 ? max_tokens : 8000, temperature: typeof temperature === "number" && temperature >= 0 && temperature <= 1 ? temperature diff --git a/app/client/platforms/bedrock.ts b/app/client/platforms/bedrock.ts index 2e0df43b8..b5f12c542 100644 --- a/app/client/platforms/bedrock.ts +++ b/app/client/platforms/bedrock.ts @@ -31,7 +31,7 @@ export class BedrockApi implements LLMApi { messages, temperature: modelConfig.temperature, stream: !!modelConfig.stream, - max_tokens: (modelConfig as any).max_tokens || 4096, // Cast to access max_tokens from ModelConfig + max_tokens: (modelConfig as any).max_tokens || 8000, // Cast to access max_tokens from ModelConfig }), signal: controller.signal, headers: getHeaders(), // getHeaders should handle Bedrock (no auth needed) diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index c6f3fc425..350ea272c 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -244,7 +244,7 @@ export class ChatGPTApi implements LLMApi { // add max_tokens to vision model if (visionModel) { - requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); + requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 8000); } } diff --git a/app/store/chat.ts b/app/store/chat.ts index 6c98923da..42c32543f 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -867,7 +867,7 @@ export const useChatStore = createPersistStore( const historyMsgLength = countMessages(toBeSummarizedMsgs); - if (historyMsgLength > (modelConfig?.max_tokens || 4000)) { + if (historyMsgLength > (modelConfig?.max_tokens || 8000)) { const n = toBeSummarizedMsgs.length; toBeSummarizedMsgs = toBeSummarizedMsgs.slice( Math.max(0, n - modelConfig.historyMessageCount), diff --git a/app/store/config.ts b/app/store/config.ts index 45e21b026..eefb39f41 100644 --- a/app/store/config.ts +++ b/app/store/config.ts @@ -68,7 +68,7 @@ export const DEFAULT_CONFIG = { providerName: "OpenAI" as ServiceProvider, temperature: 0.5, top_p: 1, - max_tokens: 4000, + max_tokens: 8000, presence_penalty: 0, frequency_penalty: 0, sendMemory: true,