mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-10-08 19:16:37 +08:00
gpt-4-turbo-preview
This commit is contained in:
parent
bcd01b773b
commit
b0b5e5e9f2
@ -103,13 +103,13 @@ export class ChatGPTApi implements LLMApi {
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
];
|
||||
|
||||
// Check if the current model is in the list of models to replace
|
||||
const finalModel = modelsToReplace.includes(modelConfig.model) ? "gpt-4-1106-preview" : modelConfig.model;
|
||||
const finalModel = modelsToReplace.includes(modelConfig.model) ? "gpt-4-turbo-preview" : modelConfig.model;
|
||||
|
||||
const requestPayload = {
|
||||
messages,
|
||||
|
@ -92,7 +92,7 @@ export const SUMMARIZE_MODEL = "gpt-3.5-turbo-1106";
|
||||
|
||||
export const KnowledgeCutOffDate: Record<string, string> = {
|
||||
default: "2021-09",
|
||||
"gpt-4-1106-preview": "2023-04",
|
||||
"gpt-4-turbo-preview": "2023-04",
|
||||
"gpt-4-vision-preview": "2023-04",
|
||||
};
|
||||
|
||||
@ -122,7 +122,7 @@ export const DEFAULT_MODELS = [
|
||||
available: true,
|
||||
},
|
||||
{
|
||||
name: "gpt-4-1106-preview",
|
||||
name: "gpt-4-turbo-preview",
|
||||
available: true,
|
||||
},
|
||||
{
|
||||
|
@ -20,7 +20,7 @@ export const EN_MASKS: BuiltinMask[] = [
|
||||
],
|
||||
"syncGlobalConfig":false,
|
||||
"modelConfig":{
|
||||
"model":"gpt-4-1106-preview",
|
||||
"model":"gpt-4-turbo-preview",
|
||||
"temperature":0.4,
|
||||
"max_tokens":4000,
|
||||
"presence_penalty":0,
|
||||
@ -60,7 +60,7 @@ export const EN_MASKS: BuiltinMask[] = [
|
||||
],
|
||||
"syncGlobalConfig":false,
|
||||
"modelConfig":{
|
||||
"model":"gpt-4-1106-preview",
|
||||
"model":"gpt-4-turbo-preview",
|
||||
"temperature":0.4,
|
||||
"max_tokens":4000,
|
||||
"presence_penalty":0,
|
||||
@ -102,7 +102,7 @@ export const EN_MASKS: BuiltinMask[] = [
|
||||
],
|
||||
"syncGlobalConfig":false,
|
||||
"modelConfig":{
|
||||
"model":"gpt-4-1106-preview",
|
||||
"model":"gpt-4-turbo-preview",
|
||||
"temperature":0.4,
|
||||
"max_tokens":4000,
|
||||
"presence_penalty":0,
|
||||
@ -132,7 +132,7 @@ export const EN_MASKS: BuiltinMask[] = [
|
||||
],
|
||||
"syncGlobalConfig":false,
|
||||
"modelConfig":{
|
||||
"model":"gpt-4-1106-preview",
|
||||
"model":"gpt-4-turbo-preview",
|
||||
"temperature":0.5,
|
||||
"top_p":1,
|
||||
"max_tokens":4000,
|
||||
@ -162,7 +162,7 @@ export const EN_MASKS: BuiltinMask[] = [
|
||||
],
|
||||
"syncGlobalConfig":false,
|
||||
"modelConfig":{
|
||||
"model":"gpt-4-1106-preview",
|
||||
"model":"gpt-4-turbo-preview",
|
||||
"temperature":0.5,
|
||||
"top_p":1,
|
||||
"max_tokens":4000,
|
||||
@ -192,7 +192,7 @@ export const EN_MASKS: BuiltinMask[] = [
|
||||
],
|
||||
"syncGlobalConfig":false,
|
||||
"modelConfig":{
|
||||
"model":"gpt-4-1106-preview",
|
||||
"model":"gpt-4-turbo-preview",
|
||||
"temperature":0.5,
|
||||
"top_p":1,
|
||||
"max_tokens":10000,
|
||||
@ -227,7 +227,7 @@ export const EN_MASKS: BuiltinMask[] = [
|
||||
],
|
||||
"syncGlobalConfig":false,
|
||||
"modelConfig":{
|
||||
"model":"gpt-4-1106-preview",
|
||||
"model":"gpt-4-turbo-preview",
|
||||
"temperature":0.5,
|
||||
"top_p":1,
|
||||
"max_tokens":4001,
|
||||
|
@ -46,15 +46,15 @@ export const DEFAULT_CONFIG = {
|
||||
models: DEFAULT_MODELS as any as LLMModel[],
|
||||
|
||||
modelConfig: {
|
||||
model: "gpt-4-1106-preview" as ModelType,
|
||||
model: "gpt-4-turbo-preview" as ModelType,
|
||||
temperature: 0.5,
|
||||
top_p: 1,
|
||||
max_tokens: 4000,
|
||||
presence_penalty: 0,
|
||||
frequency_penalty: 0,
|
||||
sendMemory: true,
|
||||
historyMessageCount: 4,
|
||||
compressMessageLengthThreshold: 1000,
|
||||
historyMessageCount: 14,
|
||||
compressMessageLengthThreshold: 43210,
|
||||
enableInjectSystemPrompts: true,
|
||||
template: DEFAULT_INPUT_TEMPLATE,
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user