Compare commits

...

21 Commits

Author SHA1 Message Date
LaskyJ
42e09ad350 Merge ac7b720b5b into 48469bd8ca 2025-04-15 13:37:29 -05:00
Davidlasky
ac7b720b5b send longer chat history 2025-04-15 13:37:26 -05:00
Davidlasky
d02f9b0dd4 added gpt-4.1 and removed gpt-4.5-preview 2025-04-15 12:52:30 -05:00
Davidlasky
f2a5af7556 ignore local Dockerfile 2025-04-01 17:53:56 -05:00
Davidlasky
ff196f22c2 local debug 2025-04-01 17:48:31 -05:00
Davidlasky
106db97f8c enable o3-mini-high, optimize timeout for thinking models 2025-04-01 17:47:16 -05:00
Davidlasky
e30d90714b remove legacy models and support new models 2025-04-01 17:45:33 -05:00
Davidlasky
2329d59c83 defaults o3-mini to high 2025-04-01 13:33:38 -05:00
Davidlasky
b5ee4c1fcf make timeout longer 2025-03-29 02:28:00 -05:00
LaskyJ
6d69494e08 Update utils.ts 2025-03-27 12:52:25 -05:00
Davidlasky
2509495cdc try to add o1 as a vision model 2025-03-25 18:01:23 -05:00
LaskyJ
d65aca6d13 Update constant.ts 2025-03-25 15:40:36 -05:00
LaskyJ
2f5184c5b4 Update constant.ts 2025-03-25 15:27:26 -05:00
GH Action - Upstream Sync
20df2eed07 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2025-03-21 01:00:47 +00:00
LaskyJ
fd998de148 Merge branch 'ChatGPTNextWeb:main' into main 2025-03-14 18:47:04 -05:00
GH Action - Upstream Sync
fd2e69d1c7 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2025-03-02 01:01:58 +00:00
GH Action - Upstream Sync
e8dcede878 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2025-03-01 01:02:26 +00:00
GH Action - Upstream Sync
3b23f5f8ab Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2025-02-27 00:58:12 +00:00
LaskyJ
75cdd15bc2 Update constant.ts 2025-02-25 21:04:07 -06:00
LaskyJ
af1dfd2a6c Update constant.ts 2025-02-25 20:52:01 -06:00
LaskyJ
6aecdd80e9 Update constant.ts 2025-02-25 20:50:10 -06:00
11 changed files with 18795 additions and 1942 deletions

1
.gitignore vendored
View File

@@ -49,3 +49,4 @@ masks.json
# mcp config
app/mcp/mcp_config.json
Dockerfile.local

View File

@@ -71,8 +71,6 @@ const ClaudeMapper = {
system: "user",
} as const;
const keys = ["claude-2, claude-instant-1"];
export class ClaudeApi implements LLMApi {
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");

View File

@@ -197,8 +197,6 @@ export class GeminiProApi implements LLMApi {
signal: controller.signal,
headers: getHeaders(),
};
const isThinking = options.config.model.includes("-thinking");
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),

View File

@@ -67,6 +67,8 @@ export interface RequestPayload {
top_p: number;
max_tokens?: number;
max_completion_tokens?: number;
reasoning_effort?: string;
// O3 only
}
export interface DalleRequestPayload {
@@ -196,9 +198,9 @@ export class ChatGPTApi implements LLMApi {
let requestPayload: RequestPayload | DalleRequestPayload;
const isDalle3 = _isDalle3(options.config.model);
const isO1OrO3 =
options.config.model.startsWith("o1") ||
options.config.model.startsWith("o3");
const isO1 = options.config.model.startsWith("o1");
const isO3 = options.config.model.startsWith("o3");
const isO1OrO3 = isO1 || isO3;
if (isDalle3) {
const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any,
@@ -242,9 +244,18 @@ export class ChatGPTApi implements LLMApi {
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
}
if (isO3) {
requestPayload["reasoning_effort"] = "high";
// make o3-mini defaults to high reasoning effort
}
// add max_tokens to vision model
if (visionModel) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
if (isO1) {
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
} else {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
}
}
}
@@ -286,6 +297,11 @@ export class ChatGPTApi implements LLMApi {
isDalle3 ? OpenaiPath.ImagePath : OpenaiPath.ChatPath,
);
}
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
if (shouldStream) {
let index = -1;
const [tools, funcs] = usePluginStore
@@ -393,12 +409,6 @@ export class ChatGPTApi implements LLMApi {
headers: getHeaders(),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);

View File

@@ -408,35 +408,20 @@ You are an AI assistant with access to system tools. Your role is to help users
`;
export const SUMMARIZE_MODEL = "gpt-4o-mini";
export const GEMINI_SUMMARIZE_MODEL = "gemini-pro";
export const SUMMARIZE_MODEL = "gpt-4.1-mini";
export const GEMINI_SUMMARIZE_MODEL = "gemini-2.0-flash";
export const DEEPSEEK_SUMMARIZE_MODEL = "deepseek-chat";
export const KnowledgeCutOffDate: Record<string, string> = {
default: "2021-09",
"gpt-4-turbo": "2023-12",
"gpt-4-turbo-2024-04-09": "2023-12",
"gpt-4-turbo-preview": "2023-12",
"gpt-4o": "2023-10",
"gpt-4o-2024-05-13": "2023-10",
"gpt-4o-2024-08-06": "2023-10",
"gpt-4o-2024-11-20": "2023-10",
"chatgpt-4o-latest": "2023-10",
"gpt-4o-mini": "2023-10",
"gpt-4o-mini-2024-07-18": "2023-10",
"gpt-4-vision-preview": "2023-04",
"o1-mini-2024-09-12": "2023-10",
"o1-mini": "2023-10",
"o1-preview-2024-09-12": "2023-10",
"o1-preview": "2023-10",
"o1-2024-12-17": "2023-10",
o1: "2023-10",
"o3-mini-2025-01-31": "2023-10",
"o3-mini": "2023-10",
default: "2023-10",
// After improvements,
// it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
"gemini-pro": "2023-12",
"gemini-pro-vision": "2023-12",
"gemini-2.5-pro-exp-03-25": "2025-01",
"gemini-2.0-flash": "2024-08",
"claude-3-7-sonnet-latest": "2024-10",
"claude-3-5-haiku-latest": "2024-10",
"gpt-4.1": "2024-06",
"gpt-4.1-mini": "2024-06",
"deepseek-chat": "2024-07",
"deepseek-coder": "2024-07",
};
@@ -457,11 +442,12 @@ export const DEFAULT_TTS_VOICES = [
export const VISION_MODEL_REGEXES = [
/vision/,
/gpt-4o/,
/gpt-4\.1/,
/claude-3/,
/gemini-1\.5/,
/gemini-exp/,
/gemini-2\.0/,
/gemini-2\.5-pro/,
/learnlm/,
/qwen-vl/,
/qwen2-vl/,
@@ -469,78 +455,23 @@ export const VISION_MODEL_REGEXES = [
/^dall-e-3$/, // Matches exactly "dall-e-3"
/glm-4v/,
/vl/i,
/o1/,
];
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
const openaiModels = [
// As of July 2024, gpt-4o-mini should be used in place of gpt-3.5-turbo,
// as it is cheaper, more capable, multimodal, and just as fast. gpt-3.5-turbo is still available for use in the API.
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-4",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
"gpt-4o-2024-11-20",
"chatgpt-4o-latest",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-4-vision-preview",
"gpt-4-turbo-2024-04-09",
"gpt-4-1106-preview",
"dall-e-3",
"o1-mini",
"o1-preview",
"o3-mini",
];
const openaiModels = ["dall-e-3", "o1", "o3-mini", "gpt-4.1", "gpt-4.1-mini"];
const googleModels = [
"gemini-1.0-pro", // Deprecated on 2/15/2025
"gemini-1.5-pro-latest",
"gemini-1.5-pro",
"gemini-1.5-pro-002",
"gemini-1.5-pro-exp-0827",
"gemini-1.5-flash-latest",
"gemini-1.5-flash-8b-latest",
"gemini-1.5-flash",
"gemini-1.5-flash-8b",
"gemini-1.5-flash-002",
"gemini-1.5-flash-exp-0827",
"learnlm-1.5-pro-experimental",
"gemini-exp-1114",
"gemini-exp-1121",
"gemini-exp-1206",
"gemini-2.0-flash",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-thinking-exp-1219",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-pro-exp",
"gemini-2.0-pro-exp-02-05",
"gemini-2.0-flash-lite",
"gemini-2.5-pro-exp-03-25",
];
const anthropicModels = [
"claude-instant-1.2",
"claude-2.0",
"claude-2.1",
"claude-3-sonnet-20240229",
"claude-3-opus-20240229",
"claude-3-opus-latest",
"claude-3-haiku-20240307",
"claude-3-5-haiku-20241022",
"claude-3-5-haiku-latest",
"claude-3-5-sonnet-20240620",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-latest",
"claude-3-7-sonnet-20250219",
"claude-3-7-sonnet-latest",
];

View File

@@ -66,14 +66,14 @@ export const DEFAULT_CONFIG = {
modelConfig: {
model: "gpt-4o-mini" as ModelType,
providerName: "OpenAI" as ServiceProvider,
temperature: 0.5,
temperature: 0.2,
top_p: 1,
max_tokens: 4000,
presence_penalty: 0,
frequency_penalty: 0,
sendMemory: true,
historyMessageCount: 4,
compressMessageLengthThreshold: 1000,
historyMessageCount: 20,
compressMessageLengthThreshold: 5000,
compressModel: "",
compressProviderName: "",
enableInjectSystemPrompts: true,

View File

@@ -304,9 +304,18 @@ export function getTimeoutMSByModel(model: string) {
model.startsWith("o1") ||
model.startsWith("o3") ||
model.includes("deepseek-r") ||
model.includes("-thinking")
)
model.includes("-thinking") ||
model.includes("pro")
) {
console.log(
"thinking model is " +
model +
" timeout is " +
REQUEST_TIMEOUT_MS_FOR_THINKING,
);
return REQUEST_TIMEOUT_MS_FOR_THINKING;
}
console.log("normal model is " + model + " timeout is " + REQUEST_TIMEOUT_MS);
return REQUEST_TIMEOUT_MS;
}

17486
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@ import { isModelNotavailableInServer } from "../app/utils/model";
describe("isModelNotavailableInServer", () => {
test("test model will return false, which means the model is available", () => {
const customModels = "";
const modelName = "gpt-4";
const modelName = "gpt-4.1";
const providerNames = "OpenAI";
const result = isModelNotavailableInServer(
customModels,

View File

@@ -15,10 +15,11 @@ describe("isVisionModel", () => {
test("should identify vision models using regex patterns", () => {
const visionModels = [
"gpt-4-vision",
"gpt-4.1",
"claude-3-opus",
"gemini-1.5-pro",
"gemini-2.0",
"gemini-2.5-pro",
"gemini-exp-vision",
"learnlm-vision",
"qwen-vl-max",

3093
yarn.lock

File diff suppressed because it is too large Load Diff