diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index f4853c689..cd03b4f62 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -103,13 +103,13 @@ export class ChatGPTApi implements LLMApi { "gpt-4", "gpt-4-0314", "gpt-4-0613", - "gpt-4-32k", + "gpt-4-turbo-preview", "gpt-4-32k-0314", "gpt-4-32k-0613", ]; // Check if the current model is in the list of models to replace - const finalModel = modelsToReplace.includes(modelConfig.model) ? "gpt-4-1106-preview" : modelConfig.model; + const finalModel = modelsToReplace.includes(modelConfig.model) ? "gpt-4-turbo-preview" : modelConfig.model; const requestPayload = { messages, diff --git a/app/constant.ts b/app/constant.ts index 618a0434f..a3fc870be 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -92,7 +92,7 @@ export const SUMMARIZE_MODEL = "gpt-3.5-turbo-1106"; export const KnowledgeCutOffDate: Record = { default: "2021-09", - "gpt-4-1106-preview": "2023-04", + "gpt-4-turbo-preview": "2023-04", "gpt-4-vision-preview": "2023-04", }; @@ -122,7 +122,7 @@ export const DEFAULT_MODELS = [ available: true, }, { - name: "gpt-4-1106-preview", + name: "gpt-4-turbo-preview", available: true, }, { diff --git a/app/masks/en.ts b/app/masks/en.ts index f1a57c8bd..47ce1810a 100644 --- a/app/masks/en.ts +++ b/app/masks/en.ts @@ -20,7 +20,7 @@ export const EN_MASKS: BuiltinMask[] = [ ], "syncGlobalConfig":false, "modelConfig":{ - "model":"gpt-4-1106-preview", + "model":"gpt-4-turbo-preview", "temperature":0.4, "max_tokens":4000, "presence_penalty":0, @@ -60,7 +60,7 @@ export const EN_MASKS: BuiltinMask[] = [ ], "syncGlobalConfig":false, "modelConfig":{ - "model":"gpt-4-1106-preview", + "model":"gpt-4-turbo-preview", "temperature":0.4, "max_tokens":4000, "presence_penalty":0, @@ -102,7 +102,7 @@ export const EN_MASKS: BuiltinMask[] = [ ], "syncGlobalConfig":false, "modelConfig":{ - "model":"gpt-4-1106-preview", + "model":"gpt-4-turbo-preview", "temperature":0.4, "max_tokens":4000, "presence_penalty":0, @@ -132,7 +132,7 @@ export const EN_MASKS: BuiltinMask[] = [ ], "syncGlobalConfig":false, "modelConfig":{ - "model":"gpt-4-1106-preview", + "model":"gpt-4-turbo-preview", "temperature":0.5, "top_p":1, "max_tokens":4000, @@ -162,7 +162,7 @@ export const EN_MASKS: BuiltinMask[] = [ ], "syncGlobalConfig":false, "modelConfig":{ - "model":"gpt-4-1106-preview", + "model":"gpt-4-turbo-preview", "temperature":0.5, "top_p":1, "max_tokens":4000, @@ -192,7 +192,7 @@ export const EN_MASKS: BuiltinMask[] = [ ], "syncGlobalConfig":false, "modelConfig":{ - "model":"gpt-4-1106-preview", + "model":"gpt-4-turbo-preview", "temperature":0.5, "top_p":1, "max_tokens":10000, @@ -227,7 +227,7 @@ export const EN_MASKS: BuiltinMask[] = [ ], "syncGlobalConfig":false, "modelConfig":{ - "model":"gpt-4-1106-preview", + "model":"gpt-4-turbo-preview", "temperature":0.5, "top_p":1, "max_tokens":4001, diff --git a/app/store/config.ts b/app/store/config.ts index 8d27d099f..ed8941ccf 100644 --- a/app/store/config.ts +++ b/app/store/config.ts @@ -46,15 +46,15 @@ export const DEFAULT_CONFIG = { models: DEFAULT_MODELS as any as LLMModel[], modelConfig: { - model: "gpt-4-1106-preview" as ModelType, + model: "gpt-4-turbo-preview" as ModelType, temperature: 0.5, top_p: 1, max_tokens: 4000, presence_penalty: 0, frequency_penalty: 0, sendMemory: true, - historyMessageCount: 4, - compressMessageLengthThreshold: 1000, + historyMessageCount: 14, + compressMessageLengthThreshold: 43210, enableInjectSystemPrompts: true, template: DEFAULT_INPUT_TEMPLATE, },