mirror of
				https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
				synced 2025-10-25 19:33:42 +08:00 
			
		
		
		
	Compare commits
	
		
			24 Commits
		
	
	
		
			6305-bugth
			...
			e157587908
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | e157587908 | ||
|  | 705dffc664 | ||
|  | 02f7e6de98 | ||
|  | 3809375694 | ||
|  | 1b0de25986 | ||
|  | 865c45dd29 | ||
|  | 1f5d8e6d9c | ||
|  | c9ef6d58ed | ||
|  | 2d7229d2b8 | ||
|  | 11b37c15bd | ||
|  | 1d0038f17d | ||
|  | 619fa519c0 | ||
|  | 48469bd8ca | ||
|  | 5a5e887f2b | ||
|  | b6f5d75656 | ||
|  | 0d41a17ef6 | ||
|  | f7cde17919 | ||
|  | 570cbb34b6 | ||
|  | 7aa9ae0a3e | ||
|  | ad6666eeaf | ||
|  | a2c4e468a0 | ||
|  | 0a25a1a8cb | ||
|  | b709ee3983 | ||
|  | 3939ff47ef | 
							
								
								
									
										30
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										30
									
								
								README.md
									
									
									
									
									
								
							| @@ -7,7 +7,7 @@ | ||||
|  | ||||
|  | ||||
|  | ||||
| <h1 align="center">NextChat (ChatGPT Next Web)</h1> | ||||
| <h1 align="center">NextChat</h1> | ||||
|  | ||||
| English / [简体中文](./README_CN.md) | ||||
|  | ||||
| @@ -22,8 +22,7 @@ English / [简体中文](./README_CN.md) | ||||
| [![MacOS][MacOS-image]][download-url] | ||||
| [![Linux][Linux-image]][download-url] | ||||
|  | ||||
| [NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases)  | ||||
| [NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) | ||||
| [NextChatAI](https://nextchat.club?utm_source=readme) / [iOS APP](https://apps.apple.com/us/app/nextchat-ai/id6743085599) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Enterprise Edition](#enterprise-edition)  | ||||
|  | ||||
|  | ||||
| [saas-url]: https://nextchat.club?utm_source=readme | ||||
| @@ -41,29 +40,12 @@ English / [简体中文](./README_CN.md) | ||||
|  | ||||
| </div> | ||||
|  | ||||
| ## 👋 Hey, NextChat is going to develop a native app! | ||||
| ## 🥳 Cheer for NextChat iOS Version Online! | ||||
| > [👉 Click Here to Install Now](https://apps.apple.com/us/app/nextchat-ai/id6743085599) | ||||
|  | ||||
| > This week we are going to start working on iOS and Android APP, and we want to find some reliable friends to do it together! | ||||
|  | ||||
|  | ||||
| ✨ Several key points: | ||||
|  | ||||
| - Starting from 0, you are a veteran | ||||
| - Completely open source, not hidden | ||||
| - Native development, pursuing the ultimate experience | ||||
|  | ||||
| Will you come and do something together? 😎 | ||||
|  | ||||
| https://github.com/ChatGPTNextWeb/NextChat/issues/6269 | ||||
|  | ||||
| #Seeking for talents is thirsty #lack of people | ||||
|  | ||||
|  | ||||
| ## 🥳 Cheer for DeepSeek, China's AI star! | ||||
|  > Purpose-Built UI for DeepSeek Reasoner Model | ||||
|   | ||||
| <img src="https://github.com/user-attachments/assets/f3952210-3af1-4dc0-9b81-40eaa4847d9a"/> | ||||
| > [❤️ Source Code Coming Soon](https://github.com/ChatGPTNextWeb/NextChat-iOS) | ||||
|  | ||||
|  | ||||
|  | ||||
|   | ||||
| ## 🫣 NextChat Support MCP  !  | ||||
|   | ||||
| @@ -40,6 +40,11 @@ export interface MultimodalContent { | ||||
|   }; | ||||
| } | ||||
|  | ||||
| export interface MultimodalContentForAlibaba { | ||||
|   text?: string; | ||||
|   image?: string; | ||||
| } | ||||
|  | ||||
| export interface RequestMessage { | ||||
|   role: MessageRole; | ||||
|   content: string | MultimodalContent[]; | ||||
|   | ||||
| @@ -7,7 +7,10 @@ import { | ||||
|   ChatMessageTool, | ||||
|   usePluginStore, | ||||
| } from "@/app/store"; | ||||
| import { streamWithThink } from "@/app/utils/chat"; | ||||
| import { | ||||
|   preProcessImageContentForAlibabaDashScope, | ||||
|   streamWithThink, | ||||
| } from "@/app/utils/chat"; | ||||
| import { | ||||
|   ChatOptions, | ||||
|   getHeaders, | ||||
| @@ -15,12 +18,14 @@ import { | ||||
|   LLMModel, | ||||
|   SpeechOptions, | ||||
|   MultimodalContent, | ||||
|   MultimodalContentForAlibaba, | ||||
| } from "../api"; | ||||
| import { getClientConfig } from "@/app/config/client"; | ||||
| import { | ||||
|   getMessageTextContent, | ||||
|   getMessageTextContentWithoutThinking, | ||||
|   getTimeoutMSByModel, | ||||
|   isVisionModel, | ||||
| } from "@/app/utils"; | ||||
| import { fetch } from "@/app/utils/stream"; | ||||
|  | ||||
| @@ -89,14 +94,6 @@ export class QwenApi implements LLMApi { | ||||
|   } | ||||
|  | ||||
|   async chat(options: ChatOptions) { | ||||
|     const messages = options.messages.map((v) => ({ | ||||
|       role: v.role, | ||||
|       content: | ||||
|         v.role === "assistant" | ||||
|           ? getMessageTextContentWithoutThinking(v) | ||||
|           : getMessageTextContent(v), | ||||
|     })); | ||||
|  | ||||
|     const modelConfig = { | ||||
|       ...useAppConfig.getState().modelConfig, | ||||
|       ...useChatStore.getState().currentSession().mask.modelConfig, | ||||
| @@ -105,6 +102,21 @@ export class QwenApi implements LLMApi { | ||||
|       }, | ||||
|     }; | ||||
|  | ||||
|     const visionModel = isVisionModel(options.config.model); | ||||
|  | ||||
|     const messages: ChatOptions["messages"] = []; | ||||
|     for (const v of options.messages) { | ||||
|       const content = ( | ||||
|         visionModel | ||||
|           ? await preProcessImageContentForAlibabaDashScope(v.content) | ||||
|           : v.role === "assistant" | ||||
|           ? getMessageTextContentWithoutThinking(v) | ||||
|           : getMessageTextContent(v) | ||||
|       ) as any; | ||||
|  | ||||
|       messages.push({ role: v.role, content }); | ||||
|     } | ||||
|  | ||||
|     const shouldStream = !!options.config.stream; | ||||
|     const requestPayload: RequestPayload = { | ||||
|       model: modelConfig.model, | ||||
| @@ -129,7 +141,7 @@ export class QwenApi implements LLMApi { | ||||
|         "X-DashScope-SSE": shouldStream ? "enable" : "disable", | ||||
|       }; | ||||
|  | ||||
|       const chatPath = this.path(Alibaba.ChatPath); | ||||
|       const chatPath = this.path(Alibaba.ChatPath(modelConfig.model)); | ||||
|       const chatPayload = { | ||||
|         method: "POST", | ||||
|         body: JSON.stringify(requestPayload), | ||||
| @@ -162,7 +174,7 @@ export class QwenApi implements LLMApi { | ||||
|             const json = JSON.parse(text); | ||||
|             const choices = json.output.choices as Array<{ | ||||
|               message: { | ||||
|                 content: string | null; | ||||
|                 content: string | null | MultimodalContentForAlibaba[]; | ||||
|                 tool_calls: ChatMessageTool[]; | ||||
|                 reasoning_content: string | null; | ||||
|               }; | ||||
| @@ -212,7 +224,9 @@ export class QwenApi implements LLMApi { | ||||
|             } else if (content && content.length > 0) { | ||||
|               return { | ||||
|                 isThinking: false, | ||||
|                 content: content, | ||||
|                 content: Array.isArray(content) | ||||
|                   ? content.map((item) => item.text).join(",") | ||||
|                   : content, | ||||
|               }; | ||||
|             } | ||||
|  | ||||
|   | ||||
| @@ -198,7 +198,8 @@ export class ChatGPTApi implements LLMApi { | ||||
|     const isDalle3 = _isDalle3(options.config.model); | ||||
|     const isO1OrO3 = | ||||
|       options.config.model.startsWith("o1") || | ||||
|       options.config.model.startsWith("o3"); | ||||
|       options.config.model.startsWith("o3") || | ||||
|       options.config.model.startsWith("o4-mini"); | ||||
|     if (isDalle3) { | ||||
|       const prompt = getMessageTextContent( | ||||
|         options.messages.slice(-1)?.pop() as any, | ||||
| @@ -243,7 +244,7 @@ export class ChatGPTApi implements LLMApi { | ||||
|       } | ||||
|  | ||||
|       // add max_tokens to vision model | ||||
|       if (visionModel) { | ||||
|       if (visionModel && !isO1OrO3) { | ||||
|         requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); | ||||
|       } | ||||
|     } | ||||
|   | ||||
| @@ -221,7 +221,12 @@ export const ByteDance = { | ||||
|  | ||||
| export const Alibaba = { | ||||
|   ExampleEndpoint: ALIBABA_BASE_URL, | ||||
|   ChatPath: "v1/services/aigc/text-generation/generation", | ||||
|   ChatPath: (modelName: string) => { | ||||
|     if (modelName.includes("vl") || modelName.includes("omni")) { | ||||
|       return "v1/services/aigc/multimodal-generation/generation"; | ||||
|     } | ||||
|     return `v1/services/aigc/text-generation/generation`; | ||||
|   }, | ||||
| }; | ||||
|  | ||||
| export const Tencent = { | ||||
| @@ -412,6 +417,14 @@ export const KnowledgeCutOffDate: Record<string, string> = { | ||||
|   "gpt-4-turbo": "2023-12", | ||||
|   "gpt-4-turbo-2024-04-09": "2023-12", | ||||
|   "gpt-4-turbo-preview": "2023-12", | ||||
|   "gpt-4.1": "2024-06", | ||||
|   "gpt-4.1-2025-04-14": "2024-06", | ||||
|   "gpt-4.1-mini": "2024-06", | ||||
|   "gpt-4.1-mini-2025-04-14": "2024-06", | ||||
|   "gpt-4.1-nano": "2024-06", | ||||
|   "gpt-4.1-nano-2025-04-14": "2024-06", | ||||
|   "gpt-4.5-preview": "2023-10", | ||||
|   "gpt-4.5-preview-2025-02-27": "2023-10", | ||||
|   "gpt-4o": "2023-10", | ||||
|   "gpt-4o-2024-05-13": "2023-10", | ||||
|   "gpt-4o-2024-08-06": "2023-10", | ||||
| @@ -453,6 +466,7 @@ export const DEFAULT_TTS_VOICES = [ | ||||
| export const VISION_MODEL_REGEXES = [ | ||||
|   /vision/, | ||||
|   /gpt-4o/, | ||||
|   /gpt-4\.1/, | ||||
|   /claude-3/, | ||||
|   /gemini-1\.5/, | ||||
|   /gemini-exp/, | ||||
| @@ -464,6 +478,8 @@ export const VISION_MODEL_REGEXES = [ | ||||
|   /^dall-e-3$/, // Matches exactly "dall-e-3" | ||||
|   /glm-4v/, | ||||
|   /vl/i, | ||||
|   /o3/, | ||||
|   /o4-mini/, | ||||
| ]; | ||||
|  | ||||
| export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/]; | ||||
| @@ -480,6 +496,14 @@ const openaiModels = [ | ||||
|   "gpt-4-32k-0613", | ||||
|   "gpt-4-turbo", | ||||
|   "gpt-4-turbo-preview", | ||||
|   "gpt-4.1", | ||||
|   "gpt-4.1-2025-04-14", | ||||
|   "gpt-4.1-mini", | ||||
|   "gpt-4.1-mini-2025-04-14", | ||||
|   "gpt-4.1-nano", | ||||
|   "gpt-4.1-nano-2025-04-14", | ||||
|   "gpt-4.5-preview", | ||||
|   "gpt-4.5-preview-2025-02-27", | ||||
|   "gpt-4o", | ||||
|   "gpt-4o-2024-05-13", | ||||
|   "gpt-4o-2024-08-06", | ||||
| @@ -494,6 +518,8 @@ const openaiModels = [ | ||||
|   "o1-mini", | ||||
|   "o1-preview", | ||||
|   "o3-mini", | ||||
|   "o3", | ||||
|   "o4-mini", | ||||
| ]; | ||||
|  | ||||
| const googleModels = [ | ||||
| @@ -520,6 +546,7 @@ const googleModels = [ | ||||
|   "gemini-2.0-flash-thinking-exp-01-21", | ||||
|   "gemini-2.0-pro-exp", | ||||
|   "gemini-2.0-pro-exp-02-05", | ||||
|   "gemini-2.5-pro-preview-06-05", | ||||
| ]; | ||||
|  | ||||
| const anthropicModels = [ | ||||
| @@ -570,6 +597,9 @@ const alibabaModes = [ | ||||
|   "qwen-max-0403", | ||||
|   "qwen-max-0107", | ||||
|   "qwen-max-longcontext", | ||||
|   "qwen-omni-turbo", | ||||
|   "qwen-vl-plus", | ||||
|   "qwen-vl-max", | ||||
| ]; | ||||
|  | ||||
| const tencentModels = [ | ||||
|   | ||||
| @@ -614,13 +614,13 @@ export const useChatStore = createPersistStore( | ||||
|           : shortTermMemoryStartIndex; | ||||
|         // and if user has cleared history messages, we should exclude the memory too. | ||||
|         const contextStartIndex = Math.max(clearContextIndex, memoryStartIndex); | ||||
|         const maxTokenThreshold = modelConfig.max_tokens; | ||||
|         // const maxTokenThreshold = modelConfig.max_tokens; | ||||
|  | ||||
|         // get recent messages as much as possible | ||||
|         const reversedRecentMessages = []; | ||||
|         for ( | ||||
|           let i = totalMessageCount - 1, tokenCount = 0; | ||||
|           i >= contextStartIndex && tokenCount < maxTokenThreshold; | ||||
|           i >= contextStartIndex ;//&& tokenCount < maxTokenThreshold; | ||||
|           i -= 1 | ||||
|         ) { | ||||
|           const msg = messages[i]; | ||||
|   | ||||
| @@ -3,7 +3,7 @@ import { | ||||
|   UPLOAD_URL, | ||||
|   REQUEST_TIMEOUT_MS, | ||||
| } from "@/app/constant"; | ||||
| import { RequestMessage } from "@/app/client/api"; | ||||
| import { MultimodalContent, RequestMessage } from "@/app/client/api"; | ||||
| import Locale from "@/app/locales"; | ||||
| import { | ||||
|   EventStreamContentType, | ||||
| @@ -70,8 +70,9 @@ export function compressImage(file: Blob, maxSize: number): Promise<string> { | ||||
|   }); | ||||
| } | ||||
|  | ||||
| export async function preProcessImageContent( | ||||
| export async function preProcessImageContentBase( | ||||
|   content: RequestMessage["content"], | ||||
|   transformImageUrl: (url: string) => Promise<{ [key: string]: any }>, | ||||
| ) { | ||||
|   if (typeof content === "string") { | ||||
|     return content; | ||||
| @@ -81,7 +82,7 @@ export async function preProcessImageContent( | ||||
|     if (part?.type == "image_url" && part?.image_url?.url) { | ||||
|       try { | ||||
|         const url = await cacheImageToBase64Image(part?.image_url?.url); | ||||
|         result.push({ type: part.type, image_url: { url } }); | ||||
|         result.push(await transformImageUrl(url)); | ||||
|       } catch (error) { | ||||
|         console.error("Error processing image URL:", error); | ||||
|       } | ||||
| @@ -92,6 +93,23 @@ export async function preProcessImageContent( | ||||
|   return result; | ||||
| } | ||||
|  | ||||
| export async function preProcessImageContent( | ||||
|   content: RequestMessage["content"], | ||||
| ) { | ||||
|   return preProcessImageContentBase(content, async (url) => ({ | ||||
|     type: "image_url", | ||||
|     image_url: { url }, | ||||
|   })) as Promise<MultimodalContent[] | string>; | ||||
| } | ||||
|  | ||||
| export async function preProcessImageContentForAlibabaDashScope( | ||||
|   content: RequestMessage["content"], | ||||
| ) { | ||||
|   return preProcessImageContentBase(content, async (url) => ({ | ||||
|     image: url, | ||||
|   })); | ||||
| } | ||||
|  | ||||
| const imageCaches: Record<string, string> = {}; | ||||
| export function cacheImageToBase64Image(imageUrl: string) { | ||||
|   if (imageUrl.includes(CACHE_URL_PREFIX)) { | ||||
|   | ||||
| @@ -15,6 +15,8 @@ const config: Config = { | ||||
|   moduleNameMapper: { | ||||
|     "^@/(.*)$": "<rootDir>/$1", | ||||
|   }, | ||||
|   extensionsToTreatAsEsm: [".ts", ".tsx"], | ||||
|   injectGlobals: true, | ||||
| }; | ||||
|  | ||||
| // createJestConfig is exported this way to ensure that next/jest can load the Next.js config which is async | ||||
|   | ||||
| @@ -1,24 +1,22 @@ | ||||
| // Learn more: https://github.com/testing-library/jest-dom | ||||
| import "@testing-library/jest-dom"; | ||||
| import { jest } from "@jest/globals"; | ||||
|  | ||||
| global.fetch = jest.fn(() => | ||||
|   Promise.resolve({ | ||||
|     ok: true, | ||||
|     status: 200, | ||||
|     json: () => Promise.resolve({}), | ||||
|     json: () => Promise.resolve([]), | ||||
|     headers: new Headers(), | ||||
|     redirected: false, | ||||
|     statusText: "OK", | ||||
|     type: "basic", | ||||
|     url: "", | ||||
|     clone: function () { | ||||
|       return this; | ||||
|     }, | ||||
|     body: null, | ||||
|     bodyUsed: false, | ||||
|     arrayBuffer: () => Promise.resolve(new ArrayBuffer(0)), | ||||
|     blob: () => Promise.resolve(new Blob()), | ||||
|     formData: () => Promise.resolve(new FormData()), | ||||
|     text: () => Promise.resolve(""), | ||||
|   }), | ||||
|   } as Response), | ||||
| ); | ||||
|   | ||||
| @@ -17,8 +17,8 @@ | ||||
|     "prompts": "node ./scripts/fetch-prompts.mjs", | ||||
|     "prepare": "husky install", | ||||
|     "proxy-dev": "sh ./scripts/init-proxy.sh && proxychains -f ./scripts/proxychains.conf yarn dev", | ||||
|     "test": "jest --watch", | ||||
|     "test:ci": "jest --ci" | ||||
|     "test": "node --no-warnings --experimental-vm-modules $(yarn bin jest) --watch", | ||||
|     "test:ci": "node --no-warnings --experimental-vm-modules $(yarn bin jest) --ci" | ||||
|   }, | ||||
|   "dependencies": { | ||||
|     "@fortaine/fetch-event-source": "^3.0.6", | ||||
|   | ||||
| @@ -1,3 +1,4 @@ | ||||
| import { jest } from "@jest/globals"; | ||||
| import { isVisionModel } from "../app/utils"; | ||||
|  | ||||
| describe("isVisionModel", () => { | ||||
|   | ||||
		Reference in New Issue
	
	Block a user