Compare commits

...

23 Commits

Author SHA1 Message Date
Shay Molcho
43d25312f0 Merge b05b2e78cd into 11b37c15bd 2025-04-17 13:03:18 +08:00
RiverRay
11b37c15bd Merge pull request #6450 from stephen-zeng/main
Some checks failed
Run Tests / test (push) Has been cancelled
Add gpt-4.1 family & gpt-4.5-preview support
2025-04-17 08:29:19 +08:00
QwQwQ
1d0038f17d add gpt-4.5-preview support 2025-04-16 22:10:47 +08:00
QwQwQ
619fa519c0 add gpt-4.1 family support 2025-04-16 22:02:35 +08:00
RiverRay
48469bd8ca Merge pull request #6392 from ChatGPTNextWeb/Leizhenpeng-patch-6
Some checks failed
Run Tests / test (push) Has been cancelled
Update README.md
2025-03-20 17:52:02 +08:00
RiverRay
5a5e887f2b Update README.md 2025-03-20 17:51:47 +08:00
RiverRay
b6f5d75656 Merge pull request #6344 from vangie/fix/jest-setup-esm
Some checks failed
Run Tests / test (push) Has been cancelled
test: fix unit test failures
2025-03-14 20:04:56 +08:00
Vangie Du
0d41a17ef6 test: fix unit test failures 2025-03-07 14:49:17 +08:00
RiverRay
f7cde17919 Merge pull request #6292 from Little-LittleProgrammer/feature/alibaba-omni-support
Some checks failed
Run Tests / test (push) Has been cancelled
feat(alibaba): Added alibaba vision model and omni model support
2025-03-01 10:25:16 +08:00
RiverRay
570cbb34b6 Merge pull request #6310 from agi-dude/patch-1
Remove duplicate links
2025-03-01 10:24:38 +08:00
RiverRay
7aa9ae0a3e Merge pull request #6311 from ChatGPTNextWeb/6305-bugthe-first-message-except-the-system-message-of-deepseek-reasoner-must-be-a-user-message-but-an-assistant-message-detected
Some checks are pending
Run Tests / test (push) Waiting to run
fix: enforce that the first message (excluding system messages) is a …
2025-02-28 19:48:09 +08:00
Kadxy
2d4180f5be fix: update request payload to use filtered messages in Deepseek API 2025-02-28 13:59:30 +08:00
Kadxy
9f0182b55e fix: enforce that the first message (excluding system messages) is a user message in the Deepseek API 2025-02-28 13:54:58 +08:00
Mr. AGI
ad6666eeaf Update README.md 2025-02-28 10:47:52 +05:00
EvanWu
a2c4e468a0 fix(app/utils/chat.ts): fix type error 2025-02-26 19:58:32 +08:00
RiverRay
2167076652 Merge pull request #6293 from hyiip/main
Some checks failed
Run Tests / test (push) Has been cancelled
claude 3.7 support
2025-02-26 18:41:28 +08:00
RiverRay
e123076250 Merge pull request #6295 from rexkyng/patch-1
Fix: Improve Mistral icon detection and remove redundant code.
2025-02-26 18:39:59 +08:00
Rex Ng
ebcb4db245 Fix: Improve Mistral icon detection and remove redundant code.
- Added "codestral" to the list of acceptable names for the Mistral icon, ensuring proper detection.
- Removed duplicate `toLowerCase()` calls.
2025-02-25 14:30:18 +08:00
EvanWu
0a25a1a8cb refacto(app/utils/chat.ts)r: optimize function preProcessImageContentBase 2025-02-25 09:22:47 +08:00
hyiip
f3154b20a5 claude 3.7 support 2025-02-25 03:55:24 +08:00
EvanWu
b709ee3983 feat(alibaba): Added alibaba vision model and omni model support 2025-02-24 20:18:07 +08:00
RiverRay
f5f3ce94f6 Update README.md
Some checks failed
Run Tests / test (push) Has been cancelled
2025-02-21 08:56:43 +08:00
Shay Molcho
b05b2e78cd Added missing periods (.) in multiple places
This commit adds missing periods (.) in several places to ensure consistency and improve readability in the code and documentation
2025-01-22 20:23:51 +02:00
11 changed files with 118 additions and 53 deletions

View File

@@ -7,7 +7,7 @@
<h1 align="center">NextChat (ChatGPT Next Web)</h1>
<h1 align="center">NextChat</h1>
English / [简体中文](./README_CN.md)
@@ -22,7 +22,6 @@ English / [简体中文](./README_CN.md)
[![MacOS][MacOS-image]][download-url]
[![Linux][Linux-image]][download-url]
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases)
[NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
@@ -41,24 +40,6 @@ English / [简体中文](./README_CN.md)
</div>
## 👋 Hey, NextChat is going to develop a native app!
> This week we are going to start working on iOS and Android APP, and we want to find some reliable friends to do it together!
✨ Several key points:
- Starting from 0, you are a veteran
- Completely open source, not hidden
- Native development, pursuing the ultimate experience
Will you come and do something together? 😎
https://github.com/ChatGPTNextWeb/NextChat/issues/6269
#Seeking for talents is thirsty #lack of people"
## 🥳 Cheer for DeepSeek, China's AI star!
> Purpose-Built UI for DeepSeek Reasoner Model
@@ -130,7 +111,7 @@ For enterprise inquiries, please contact: **business@nextchat.dev**
- 🚀 v2.15.8 Now supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
- 🚀 v2.15.4 The Application supports using Tauri fetch LLM API, MORE SECURITY! [#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379)
- 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
- 🚀 v2.14.0 Now supports Artifacts & SD
- 🚀 v2.14.0 Now supports Artifacts & SD.
- 🚀 v2.10.1 support Google Gemini Pro model.
- 🚀 v2.9.11 you can use azure endpoint now.
- 🚀 v2.8 now we have a client that runs across all platforms!
@@ -338,7 +319,7 @@ For ByteDance: use `modelName@bytedance=deploymentName` to customize model name
### `DEFAULT_MODEL` optional
Change default model
Change default model.
### `VISION_MODELS` (optional)
@@ -369,7 +350,7 @@ Customize Stability API url.
### `ENABLE_MCP` (optional)
Enable MCPModel Context ProtocolFeature
Enable MCPModel Context ProtocolFeature.
### `SILICONFLOW_API_KEY` (optional)

View File

@@ -40,6 +40,11 @@ export interface MultimodalContent {
};
}
export interface MultimodalContentForAlibaba {
text?: string;
image?: string;
}
export interface RequestMessage {
role: MessageRole;
content: string | MultimodalContent[];

View File

@@ -7,7 +7,10 @@ import {
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import { streamWithThink } from "@/app/utils/chat";
import {
preProcessImageContentForAlibabaDashScope,
streamWithThink,
} from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
@@ -15,12 +18,14 @@ import {
LLMModel,
SpeechOptions,
MultimodalContent,
MultimodalContentForAlibaba,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import {
getMessageTextContent,
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
isVisionModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream";
@@ -89,14 +94,6 @@ export class QwenApi implements LLMApi {
}
async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({
role: v.role,
content:
v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: getMessageTextContent(v),
}));
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
@@ -105,6 +102,21 @@ export class QwenApi implements LLMApi {
},
};
const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = (
visionModel
? await preProcessImageContentForAlibabaDashScope(v.content)
: v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: getMessageTextContent(v)
) as any;
messages.push({ role: v.role, content });
}
const shouldStream = !!options.config.stream;
const requestPayload: RequestPayload = {
model: modelConfig.model,
@@ -129,7 +141,7 @@ export class QwenApi implements LLMApi {
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
};
const chatPath = this.path(Alibaba.ChatPath);
const chatPath = this.path(Alibaba.ChatPath(modelConfig.model));
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
@@ -162,7 +174,7 @@ export class QwenApi implements LLMApi {
const json = JSON.parse(text);
const choices = json.output.choices as Array<{
message: {
content: string | null;
content: string | null | MultimodalContentForAlibaba[];
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
@@ -212,7 +224,9 @@ export class QwenApi implements LLMApi {
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,
content: Array.isArray(content)
? content.map((item) => item.text).join(",")
: content,
};
}

View File

@@ -75,6 +75,25 @@ export class DeepSeekApi implements LLMApi {
}
}
// 检测并修复消息顺序确保除system外的第一个消息是user
const filteredMessages: ChatOptions["messages"] = [];
let hasFoundFirstUser = false;
for (const msg of messages) {
if (msg.role === "system") {
// Keep all system messages
filteredMessages.push(msg);
} else if (msg.role === "user") {
// User message directly added
filteredMessages.push(msg);
hasFoundFirstUser = true;
} else if (hasFoundFirstUser) {
// After finding the first user message, all subsequent non-system messages are retained.
filteredMessages.push(msg);
}
// If hasFoundFirstUser is false and it is not a system message, it will be skipped.
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
@@ -85,7 +104,7 @@ export class DeepSeekApi implements LLMApi {
};
const requestPayload: RequestPayload = {
messages,
messages: filteredMessages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,

View File

@@ -66,11 +66,11 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
LlmIcon = BotIconGemma;
} else if (modelName.startsWith("claude")) {
LlmIcon = BotIconClaude;
} else if (modelName.toLowerCase().includes("llama")) {
} else if (modelName.includes("llama")) {
LlmIcon = BotIconMeta;
} else if (modelName.startsWith("mixtral")) {
} else if (modelName.startsWith("mixtral") || modelName.startsWith("codestral")) {
LlmIcon = BotIconMistral;
} else if (modelName.toLowerCase().includes("deepseek")) {
} else if (modelName.includes("deepseek")) {
LlmIcon = BotIconDeepseek;
} else if (modelName.startsWith("moonshot")) {
LlmIcon = BotIconMoonshot;
@@ -85,7 +85,7 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
} else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
LlmIcon = BotIconDoubao;
} else if (
modelName.toLowerCase().includes("glm") ||
modelName.includes("glm") ||
modelName.startsWith("cogview-") ||
modelName.startsWith("cogvideox-")
) {

View File

@@ -221,7 +221,12 @@ export const ByteDance = {
export const Alibaba = {
ExampleEndpoint: ALIBABA_BASE_URL,
ChatPath: "v1/services/aigc/text-generation/generation",
ChatPath: (modelName: string) => {
if (modelName.includes("vl") || modelName.includes("omni")) {
return "v1/services/aigc/multimodal-generation/generation";
}
return `v1/services/aigc/text-generation/generation`;
},
};
export const Tencent = {
@@ -412,6 +417,14 @@ export const KnowledgeCutOffDate: Record<string, string> = {
"gpt-4-turbo": "2023-12",
"gpt-4-turbo-2024-04-09": "2023-12",
"gpt-4-turbo-preview": "2023-12",
"gpt-4.1": "2024-06",
"gpt-4.1-2025-04-14": "2024-06",
"gpt-4.1-mini": "2024-06",
"gpt-4.1-mini-2025-04-14": "2024-06",
"gpt-4.1-nano": "2024-06",
"gpt-4.1-nano-2025-04-14": "2024-06",
"gpt-4.5-preview": "2023-10",
"gpt-4.5-preview-2025-02-27": "2023-10",
"gpt-4o": "2023-10",
"gpt-4o-2024-05-13": "2023-10",
"gpt-4o-2024-08-06": "2023-10",
@@ -453,6 +466,7 @@ export const DEFAULT_TTS_VOICES = [
export const VISION_MODEL_REGEXES = [
/vision/,
/gpt-4o/,
/gpt-4\.1/,
/claude-3/,
/gemini-1\.5/,
/gemini-exp/,
@@ -480,6 +494,14 @@ const openaiModels = [
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4.1",
"gpt-4.1-2025-04-14",
"gpt-4.1-mini",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano",
"gpt-4.1-nano-2025-04-14",
"gpt-4.5-preview",
"gpt-4.5-preview-2025-02-27",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
@@ -535,6 +557,8 @@ const anthropicModels = [
"claude-3-5-sonnet-20240620",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-latest",
"claude-3-7-sonnet-20250219",
"claude-3-7-sonnet-latest",
];
const baiduModels = [
@@ -568,6 +592,9 @@ const alibabaModes = [
"qwen-max-0403",
"qwen-max-0107",
"qwen-max-longcontext",
"qwen-omni-turbo",
"qwen-vl-plus",
"qwen-vl-max",
];
const tencentModels = [

View File

@@ -3,7 +3,7 @@ import {
UPLOAD_URL,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import { RequestMessage } from "@/app/client/api";
import { MultimodalContent, RequestMessage } from "@/app/client/api";
import Locale from "@/app/locales";
import {
EventStreamContentType,
@@ -70,8 +70,9 @@ export function compressImage(file: Blob, maxSize: number): Promise<string> {
});
}
export async function preProcessImageContent(
export async function preProcessImageContentBase(
content: RequestMessage["content"],
transformImageUrl: (url: string) => Promise<{ [key: string]: any }>,
) {
if (typeof content === "string") {
return content;
@@ -81,7 +82,7 @@ export async function preProcessImageContent(
if (part?.type == "image_url" && part?.image_url?.url) {
try {
const url = await cacheImageToBase64Image(part?.image_url?.url);
result.push({ type: part.type, image_url: { url } });
result.push(await transformImageUrl(url));
} catch (error) {
console.error("Error processing image URL:", error);
}
@@ -92,6 +93,23 @@ export async function preProcessImageContent(
return result;
}
export async function preProcessImageContent(
content: RequestMessage["content"],
) {
return preProcessImageContentBase(content, async (url) => ({
type: "image_url",
image_url: { url },
})) as Promise<MultimodalContent[] | string>;
}
export async function preProcessImageContentForAlibabaDashScope(
content: RequestMessage["content"],
) {
return preProcessImageContentBase(content, async (url) => ({
image: url,
}));
}
const imageCaches: Record<string, string> = {};
export function cacheImageToBase64Image(imageUrl: string) {
if (imageUrl.includes(CACHE_URL_PREFIX)) {

View File

@@ -15,6 +15,8 @@ const config: Config = {
moduleNameMapper: {
"^@/(.*)$": "<rootDir>/$1",
},
extensionsToTreatAsEsm: [".ts", ".tsx"],
injectGlobals: true,
};
// createJestConfig is exported this way to ensure that next/jest can load the Next.js config which is async

View File

@@ -1,24 +1,22 @@
// Learn more: https://github.com/testing-library/jest-dom
import "@testing-library/jest-dom";
import { jest } from "@jest/globals";
global.fetch = jest.fn(() =>
Promise.resolve({
ok: true,
status: 200,
json: () => Promise.resolve({}),
json: () => Promise.resolve([]),
headers: new Headers(),
redirected: false,
statusText: "OK",
type: "basic",
url: "",
clone: function () {
return this;
},
body: null,
bodyUsed: false,
arrayBuffer: () => Promise.resolve(new ArrayBuffer(0)),
blob: () => Promise.resolve(new Blob()),
formData: () => Promise.resolve(new FormData()),
text: () => Promise.resolve(""),
}),
} as Response),
);

View File

@@ -17,8 +17,8 @@
"prompts": "node ./scripts/fetch-prompts.mjs",
"prepare": "husky install",
"proxy-dev": "sh ./scripts/init-proxy.sh && proxychains -f ./scripts/proxychains.conf yarn dev",
"test": "jest --watch",
"test:ci": "jest --ci"
"test": "node --no-warnings --experimental-vm-modules $(yarn bin jest) --watch",
"test:ci": "node --no-warnings --experimental-vm-modules $(yarn bin jest) --ci"
},
"dependencies": {
"@fortaine/fetch-event-source": "^3.0.6",

View File

@@ -1,3 +1,4 @@
import { jest } from "@jest/globals";
import { isVisionModel } from "../app/utils";
describe("isVisionModel", () => {
@@ -50,7 +51,7 @@ describe("isVisionModel", () => {
test("should identify models from VISION_MODELS env var", () => {
process.env.VISION_MODELS = "custom-vision-model,another-vision-model";
expect(isVisionModel("custom-vision-model")).toBe(true);
expect(isVisionModel("another-vision-model")).toBe(true);
expect(isVisionModel("unrelated-model")).toBe(false);
@@ -64,4 +65,4 @@ describe("isVisionModel", () => {
expect(isVisionModel("unrelated-model")).toBe(false);
expect(isVisionModel("gpt-4-vision")).toBe(true);
});
});
});