mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-10-19 00:13:41 +08:00
Compare commits
21 Commits
JI4JUN-fea
...
42e09ad350
Author | SHA1 | Date | |
---|---|---|---|
|
42e09ad350 | ||
|
ac7b720b5b | ||
|
d02f9b0dd4 | ||
|
f2a5af7556 | ||
|
ff196f22c2 | ||
|
106db97f8c | ||
|
e30d90714b | ||
|
2329d59c83 | ||
|
b5ee4c1fcf | ||
|
6d69494e08 | ||
|
2509495cdc | ||
|
d65aca6d13 | ||
|
2f5184c5b4 | ||
|
20df2eed07 | ||
|
fd998de148 | ||
|
fd2e69d1c7 | ||
|
e8dcede878 | ||
|
3b23f5f8ab | ||
|
75cdd15bc2 | ||
|
af1dfd2a6c | ||
|
6aecdd80e9 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -49,3 +49,4 @@ masks.json
|
||||
|
||||
# mcp config
|
||||
app/mcp/mcp_config.json
|
||||
Dockerfile.local
|
||||
|
@@ -71,8 +71,6 @@ const ClaudeMapper = {
|
||||
system: "user",
|
||||
} as const;
|
||||
|
||||
const keys = ["claude-2, claude-instant-1"];
|
||||
|
||||
export class ClaudeApi implements LLMApi {
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
|
@@ -197,8 +197,6 @@ export class GeminiProApi implements LLMApi {
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
const isThinking = options.config.model.includes("-thinking");
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
|
@@ -67,6 +67,8 @@ export interface RequestPayload {
|
||||
top_p: number;
|
||||
max_tokens?: number;
|
||||
max_completion_tokens?: number;
|
||||
reasoning_effort?: string;
|
||||
// O3 only
|
||||
}
|
||||
|
||||
export interface DalleRequestPayload {
|
||||
@@ -196,9 +198,9 @@ export class ChatGPTApi implements LLMApi {
|
||||
let requestPayload: RequestPayload | DalleRequestPayload;
|
||||
|
||||
const isDalle3 = _isDalle3(options.config.model);
|
||||
const isO1OrO3 =
|
||||
options.config.model.startsWith("o1") ||
|
||||
options.config.model.startsWith("o3");
|
||||
const isO1 = options.config.model.startsWith("o1");
|
||||
const isO3 = options.config.model.startsWith("o3");
|
||||
const isO1OrO3 = isO1 || isO3;
|
||||
if (isDalle3) {
|
||||
const prompt = getMessageTextContent(
|
||||
options.messages.slice(-1)?.pop() as any,
|
||||
@@ -242,9 +244,18 @@ export class ChatGPTApi implements LLMApi {
|
||||
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||
}
|
||||
|
||||
if (isO3) {
|
||||
requestPayload["reasoning_effort"] = "high";
|
||||
// make o3-mini defaults to high reasoning effort
|
||||
}
|
||||
|
||||
// add max_tokens to vision model
|
||||
if (visionModel) {
|
||||
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
||||
if (isO1) {
|
||||
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||
} else {
|
||||
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,6 +297,11 @@ export class ChatGPTApi implements LLMApi {
|
||||
isDalle3 ? OpenaiPath.ImagePath : OpenaiPath.ChatPath,
|
||||
);
|
||||
}
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
getTimeoutMSByModel(options.config.model),
|
||||
);
|
||||
if (shouldStream) {
|
||||
let index = -1;
|
||||
const [tools, funcs] = usePluginStore
|
||||
@@ -393,12 +409,6 @@ export class ChatGPTApi implements LLMApi {
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
getTimeoutMSByModel(options.config.model),
|
||||
);
|
||||
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
|
@@ -408,35 +408,20 @@ You are an AI assistant with access to system tools. Your role is to help users
|
||||
|
||||
`;
|
||||
|
||||
export const SUMMARIZE_MODEL = "gpt-4o-mini";
|
||||
export const GEMINI_SUMMARIZE_MODEL = "gemini-pro";
|
||||
export const SUMMARIZE_MODEL = "gpt-4.1-mini";
|
||||
export const GEMINI_SUMMARIZE_MODEL = "gemini-2.0-flash";
|
||||
export const DEEPSEEK_SUMMARIZE_MODEL = "deepseek-chat";
|
||||
|
||||
export const KnowledgeCutOffDate: Record<string, string> = {
|
||||
default: "2021-09",
|
||||
"gpt-4-turbo": "2023-12",
|
||||
"gpt-4-turbo-2024-04-09": "2023-12",
|
||||
"gpt-4-turbo-preview": "2023-12",
|
||||
"gpt-4o": "2023-10",
|
||||
"gpt-4o-2024-05-13": "2023-10",
|
||||
"gpt-4o-2024-08-06": "2023-10",
|
||||
"gpt-4o-2024-11-20": "2023-10",
|
||||
"chatgpt-4o-latest": "2023-10",
|
||||
"gpt-4o-mini": "2023-10",
|
||||
"gpt-4o-mini-2024-07-18": "2023-10",
|
||||
"gpt-4-vision-preview": "2023-04",
|
||||
"o1-mini-2024-09-12": "2023-10",
|
||||
"o1-mini": "2023-10",
|
||||
"o1-preview-2024-09-12": "2023-10",
|
||||
"o1-preview": "2023-10",
|
||||
"o1-2024-12-17": "2023-10",
|
||||
o1: "2023-10",
|
||||
"o3-mini-2025-01-31": "2023-10",
|
||||
"o3-mini": "2023-10",
|
||||
default: "2023-10",
|
||||
// After improvements,
|
||||
// it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
|
||||
"gemini-pro": "2023-12",
|
||||
"gemini-pro-vision": "2023-12",
|
||||
"gemini-2.5-pro-exp-03-25": "2025-01",
|
||||
"gemini-2.0-flash": "2024-08",
|
||||
"claude-3-7-sonnet-latest": "2024-10",
|
||||
"claude-3-5-haiku-latest": "2024-10",
|
||||
"gpt-4.1": "2024-06",
|
||||
"gpt-4.1-mini": "2024-06",
|
||||
"deepseek-chat": "2024-07",
|
||||
"deepseek-coder": "2024-07",
|
||||
};
|
||||
@@ -457,11 +442,12 @@ export const DEFAULT_TTS_VOICES = [
|
||||
|
||||
export const VISION_MODEL_REGEXES = [
|
||||
/vision/,
|
||||
/gpt-4o/,
|
||||
/gpt-4\.1/,
|
||||
/claude-3/,
|
||||
/gemini-1\.5/,
|
||||
/gemini-exp/,
|
||||
/gemini-2\.0/,
|
||||
/gemini-2\.5-pro/,
|
||||
/learnlm/,
|
||||
/qwen-vl/,
|
||||
/qwen2-vl/,
|
||||
@@ -469,78 +455,23 @@ export const VISION_MODEL_REGEXES = [
|
||||
/^dall-e-3$/, // Matches exactly "dall-e-3"
|
||||
/glm-4v/,
|
||||
/vl/i,
|
||||
/o1/,
|
||||
];
|
||||
|
||||
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
|
||||
|
||||
const openaiModels = [
|
||||
// As of July 2024, gpt-4o-mini should be used in place of gpt-3.5-turbo,
|
||||
// as it is cheaper, more capable, multimodal, and just as fast. gpt-3.5-turbo is still available for use in the API.
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-4",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
"gpt-4o-2024-11-20",
|
||||
"chatgpt-4o-latest",
|
||||
"gpt-4o-mini",
|
||||
"gpt-4o-mini-2024-07-18",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-1106-preview",
|
||||
"dall-e-3",
|
||||
"o1-mini",
|
||||
"o1-preview",
|
||||
"o3-mini",
|
||||
];
|
||||
const openaiModels = ["dall-e-3", "o1", "o3-mini", "gpt-4.1", "gpt-4.1-mini"];
|
||||
|
||||
const googleModels = [
|
||||
"gemini-1.0-pro", // Deprecated on 2/15/2025
|
||||
"gemini-1.5-pro-latest",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-pro-002",
|
||||
"gemini-1.5-pro-exp-0827",
|
||||
"gemini-1.5-flash-latest",
|
||||
"gemini-1.5-flash-8b-latest",
|
||||
"gemini-1.5-flash",
|
||||
"gemini-1.5-flash-8b",
|
||||
"gemini-1.5-flash-002",
|
||||
"gemini-1.5-flash-exp-0827",
|
||||
"learnlm-1.5-pro-experimental",
|
||||
"gemini-exp-1114",
|
||||
"gemini-exp-1121",
|
||||
"gemini-exp-1206",
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-lite-preview-02-05",
|
||||
"gemini-2.0-flash-thinking-exp",
|
||||
"gemini-2.0-flash-thinking-exp-1219",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
"gemini-2.0-pro-exp",
|
||||
"gemini-2.0-pro-exp-02-05",
|
||||
"gemini-2.0-flash-lite",
|
||||
"gemini-2.5-pro-exp-03-25",
|
||||
];
|
||||
|
||||
const anthropicModels = [
|
||||
"claude-instant-1.2",
|
||||
"claude-2.0",
|
||||
"claude-2.1",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-opus-latest",
|
||||
"claude-3-haiku-20240307",
|
||||
"claude-3-5-haiku-20241022",
|
||||
"claude-3-5-haiku-latest",
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
"claude-3-5-sonnet-latest",
|
||||
"claude-3-7-sonnet-20250219",
|
||||
"claude-3-7-sonnet-latest",
|
||||
];
|
||||
|
||||
|
@@ -66,14 +66,14 @@ export const DEFAULT_CONFIG = {
|
||||
modelConfig: {
|
||||
model: "gpt-4o-mini" as ModelType,
|
||||
providerName: "OpenAI" as ServiceProvider,
|
||||
temperature: 0.5,
|
||||
temperature: 0.2,
|
||||
top_p: 1,
|
||||
max_tokens: 4000,
|
||||
presence_penalty: 0,
|
||||
frequency_penalty: 0,
|
||||
sendMemory: true,
|
||||
historyMessageCount: 4,
|
||||
compressMessageLengthThreshold: 1000,
|
||||
historyMessageCount: 20,
|
||||
compressMessageLengthThreshold: 5000,
|
||||
compressModel: "",
|
||||
compressProviderName: "",
|
||||
enableInjectSystemPrompts: true,
|
||||
|
13
app/utils.ts
13
app/utils.ts
@@ -304,9 +304,18 @@ export function getTimeoutMSByModel(model: string) {
|
||||
model.startsWith("o1") ||
|
||||
model.startsWith("o3") ||
|
||||
model.includes("deepseek-r") ||
|
||||
model.includes("-thinking")
|
||||
)
|
||||
model.includes("-thinking") ||
|
||||
model.includes("pro")
|
||||
) {
|
||||
console.log(
|
||||
"thinking model is " +
|
||||
model +
|
||||
" timeout is " +
|
||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||
);
|
||||
return REQUEST_TIMEOUT_MS_FOR_THINKING;
|
||||
}
|
||||
console.log("normal model is " + model + " timeout is " + REQUEST_TIMEOUT_MS);
|
||||
return REQUEST_TIMEOUT_MS;
|
||||
}
|
||||
|
||||
|
17486
package-lock.json
generated
Normal file
17486
package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,7 @@ import { isModelNotavailableInServer } from "../app/utils/model";
|
||||
describe("isModelNotavailableInServer", () => {
|
||||
test("test model will return false, which means the model is available", () => {
|
||||
const customModels = "";
|
||||
const modelName = "gpt-4";
|
||||
const modelName = "gpt-4.1";
|
||||
const providerNames = "OpenAI";
|
||||
const result = isModelNotavailableInServer(
|
||||
customModels,
|
||||
|
@@ -15,10 +15,11 @@ describe("isVisionModel", () => {
|
||||
|
||||
test("should identify vision models using regex patterns", () => {
|
||||
const visionModels = [
|
||||
"gpt-4-vision",
|
||||
"gpt-4.1",
|
||||
"claude-3-opus",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-2.0",
|
||||
"gemini-2.5-pro",
|
||||
"gemini-exp-vision",
|
||||
"learnlm-vision",
|
||||
"qwen-vl-max",
|
||||
|
Reference in New Issue
Block a user