mirror of
				https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
				synced 2025-11-04 08:13:43 +08:00 
			
		
		
		
	Compare commits
	
		
			29 Commits
		
	
	
		
			Leizhenpen
			...
			aa34e43a5e
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					aa34e43a5e | ||
| 
						 | 
					3809375694 | ||
| 
						 | 
					1b0de25986 | ||
| 
						 | 
					865c45dd29 | ||
| 
						 | 
					1f5d8e6d9c | ||
| 
						 | 
					c9ef6d58ed | ||
| 
						 | 
					2d7229d2b8 | ||
| 
						 | 
					11b37c15bd | ||
| 
						 | 
					1d0038f17d | ||
| 
						 | 
					619fa519c0 | ||
| 
						 | 
					48469bd8ca | ||
| 
						 | 
					5a5e887f2b | ||
| 
						 | 
					b6f5d75656 | ||
| 
						 | 
					0d41a17ef6 | ||
| 
						 | 
					f7cde17919 | ||
| 
						 | 
					570cbb34b6 | ||
| 
						 | 
					7aa9ae0a3e | ||
| 
						 | 
					2d4180f5be | ||
| 
						 | 
					9f0182b55e | ||
| 
						 | 
					ad6666eeaf | ||
| 
						 | 
					a2c4e468a0 | ||
| 
						 | 
					2167076652 | ||
| 
						 | 
					e123076250 | ||
| 
						 | 
					ebcb4db245 | ||
| 
						 | 
					0a25a1a8cb | ||
| 
						 | 
					f3154b20a5 | ||
| 
						 | 
					b709ee3983 | ||
| 
						 | 
					f5f3ce94f6 | ||
| 
						 | 
					b05b2e78cd | 
							
								
								
									
										36
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										36
									
								
								README.md
									
									
									
									
									
								
							@@ -7,7 +7,7 @@
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
<h1 align="center">NextChat (ChatGPT Next Web)</h1>
 | 
					<h1 align="center">NextChat</h1>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
English / [简体中文](./README_CN.md)
 | 
					English / [简体中文](./README_CN.md)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -22,8 +22,7 @@ English / [简体中文](./README_CN.md)
 | 
				
			|||||||
[![MacOS][MacOS-image]][download-url]
 | 
					[![MacOS][MacOS-image]][download-url]
 | 
				
			||||||
[![Linux][Linux-image]][download-url]
 | 
					[![Linux][Linux-image]][download-url]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) 
 | 
					[NextChatAI](https://nextchat.club?utm_source=readme) / [iOS APP](https://apps.apple.com/us/app/nextchat-ai/id6743085599) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Enterprise Edition](#enterprise-edition) 
 | 
				
			||||||
[NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
[saas-url]: https://nextchat.club?utm_source=readme
 | 
					[saas-url]: https://nextchat.club?utm_source=readme
 | 
				
			||||||
@@ -41,31 +40,14 @@ English / [简体中文](./README_CN.md)
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
</div>
 | 
					</div>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
## 👋 Hey, NextChat is going to develop a native app!
 | 
					## 🥳 Cheer for NextChat iOS Version Online!
 | 
				
			||||||
 | 
					> [👉 Click Here to Install Now](https://apps.apple.com/us/app/nextchat-ai/id6743085599)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
> This week we are going to start working on iOS and Android APP, and we want to find some reliable friends to do it together!
 | 
					> [❤️ Source Code Coming Soon](https://github.com/ChatGPTNextWeb/NextChat-iOS)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
✨ Several key points:
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
- Starting from 0, you are a veteran
 | 
					 | 
				
			||||||
- Completely open source, not hidden
 | 
					 | 
				
			||||||
- Native development, pursuing the ultimate experience
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
Will you come and do something together? 😎
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
https://github.com/ChatGPTNextWeb/NextChat/issues/6269
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#Seeking for talents is thirsty #lack of people
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
## 🥳 Cheer for DeepSeek, China's AI star!
 | 
					 | 
				
			||||||
 > Purpose-Built UI for DeepSeek Reasoner Model
 | 
					 | 
				
			||||||
 
 | 
					 
 | 
				
			||||||
<img src="https://github.com/user-attachments/assets/f3952210-3af1-4dc0-9b81-40eaa4847d9a"/>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
## 🫣 NextChat Support MCP  ! 
 | 
					## 🫣 NextChat Support MCP  ! 
 | 
				
			||||||
> Before build, please set env ENABLE_MCP=true
 | 
					> Before build, please set env ENABLE_MCP=true
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -130,7 +112,7 @@ For enterprise inquiries, please contact: **business@nextchat.dev**
 | 
				
			|||||||
- 🚀 v2.15.8 Now supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
 | 
					- 🚀 v2.15.8 Now supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
 | 
				
			||||||
- 🚀 v2.15.4 The Application supports using Tauri fetch LLM API, MORE SECURITY! [#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379)
 | 
					- 🚀 v2.15.4 The Application supports using Tauri fetch LLM API, MORE SECURITY! [#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379)
 | 
				
			||||||
- 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
 | 
					- 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
 | 
				
			||||||
- 🚀 v2.14.0 Now supports  Artifacts & SD 
 | 
					- 🚀 v2.14.0 Now supports  Artifacts & SD.
 | 
				
			||||||
- 🚀 v2.10.1 support Google Gemini Pro model.
 | 
					- 🚀 v2.10.1 support Google Gemini Pro model.
 | 
				
			||||||
- 🚀 v2.9.11 you can use azure endpoint now.
 | 
					- 🚀 v2.9.11 you can use azure endpoint now.
 | 
				
			||||||
- 🚀 v2.8 now we have a client that runs across all platforms!
 | 
					- 🚀 v2.8 now we have a client that runs across all platforms!
 | 
				
			||||||
@@ -338,7 +320,7 @@ For ByteDance: use `modelName@bytedance=deploymentName` to customize model name
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
### `DEFAULT_MODEL` (optional)
 | 
					### `DEFAULT_MODEL` (optional)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
Change default model
 | 
					Change default model.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
### `VISION_MODELS` (optional)
 | 
					### `VISION_MODELS` (optional)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -369,7 +351,7 @@ Customize Stability API url.
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
### `ENABLE_MCP` (optional)
 | 
					### `ENABLE_MCP` (optional)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
Enable MCP(Model Context Protocol)Feature
 | 
					Enable MCP(Model Context Protocol)Feature.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
### `SILICONFLOW_API_KEY` (optional)
 | 
					### `SILICONFLOW_API_KEY` (optional)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -40,6 +40,11 @@ export interface MultimodalContent {
 | 
				
			|||||||
  };
 | 
					  };
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export interface MultimodalContentForAlibaba {
 | 
				
			||||||
 | 
					  text?: string;
 | 
				
			||||||
 | 
					  image?: string;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export interface RequestMessage {
 | 
					export interface RequestMessage {
 | 
				
			||||||
  role: MessageRole;
 | 
					  role: MessageRole;
 | 
				
			||||||
  content: string | MultimodalContent[];
 | 
					  content: string | MultimodalContent[];
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -7,7 +7,10 @@ import {
 | 
				
			|||||||
  ChatMessageTool,
 | 
					  ChatMessageTool,
 | 
				
			||||||
  usePluginStore,
 | 
					  usePluginStore,
 | 
				
			||||||
} from "@/app/store";
 | 
					} from "@/app/store";
 | 
				
			||||||
import { streamWithThink } from "@/app/utils/chat";
 | 
					import {
 | 
				
			||||||
 | 
					  preProcessImageContentForAlibabaDashScope,
 | 
				
			||||||
 | 
					  streamWithThink,
 | 
				
			||||||
 | 
					} from "@/app/utils/chat";
 | 
				
			||||||
import {
 | 
					import {
 | 
				
			||||||
  ChatOptions,
 | 
					  ChatOptions,
 | 
				
			||||||
  getHeaders,
 | 
					  getHeaders,
 | 
				
			||||||
@@ -15,12 +18,14 @@ import {
 | 
				
			|||||||
  LLMModel,
 | 
					  LLMModel,
 | 
				
			||||||
  SpeechOptions,
 | 
					  SpeechOptions,
 | 
				
			||||||
  MultimodalContent,
 | 
					  MultimodalContent,
 | 
				
			||||||
 | 
					  MultimodalContentForAlibaba,
 | 
				
			||||||
} from "../api";
 | 
					} from "../api";
 | 
				
			||||||
import { getClientConfig } from "@/app/config/client";
 | 
					import { getClientConfig } from "@/app/config/client";
 | 
				
			||||||
import {
 | 
					import {
 | 
				
			||||||
  getMessageTextContent,
 | 
					  getMessageTextContent,
 | 
				
			||||||
  getMessageTextContentWithoutThinking,
 | 
					  getMessageTextContentWithoutThinking,
 | 
				
			||||||
  getTimeoutMSByModel,
 | 
					  getTimeoutMSByModel,
 | 
				
			||||||
 | 
					  isVisionModel,
 | 
				
			||||||
} from "@/app/utils";
 | 
					} from "@/app/utils";
 | 
				
			||||||
import { fetch } from "@/app/utils/stream";
 | 
					import { fetch } from "@/app/utils/stream";
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -89,14 +94,6 @@ export class QwenApi implements LLMApi {
 | 
				
			|||||||
  }
 | 
					  }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  async chat(options: ChatOptions) {
 | 
					  async chat(options: ChatOptions) {
 | 
				
			||||||
    const messages = options.messages.map((v) => ({
 | 
					 | 
				
			||||||
      role: v.role,
 | 
					 | 
				
			||||||
      content:
 | 
					 | 
				
			||||||
        v.role === "assistant"
 | 
					 | 
				
			||||||
          ? getMessageTextContentWithoutThinking(v)
 | 
					 | 
				
			||||||
          : getMessageTextContent(v),
 | 
					 | 
				
			||||||
    }));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    const modelConfig = {
 | 
					    const modelConfig = {
 | 
				
			||||||
      ...useAppConfig.getState().modelConfig,
 | 
					      ...useAppConfig.getState().modelConfig,
 | 
				
			||||||
      ...useChatStore.getState().currentSession().mask.modelConfig,
 | 
					      ...useChatStore.getState().currentSession().mask.modelConfig,
 | 
				
			||||||
@@ -105,6 +102,21 @@ export class QwenApi implements LLMApi {
 | 
				
			|||||||
      },
 | 
					      },
 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    const visionModel = isVisionModel(options.config.model);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    const messages: ChatOptions["messages"] = [];
 | 
				
			||||||
 | 
					    for (const v of options.messages) {
 | 
				
			||||||
 | 
					      const content = (
 | 
				
			||||||
 | 
					        visionModel
 | 
				
			||||||
 | 
					          ? await preProcessImageContentForAlibabaDashScope(v.content)
 | 
				
			||||||
 | 
					          : v.role === "assistant"
 | 
				
			||||||
 | 
					          ? getMessageTextContentWithoutThinking(v)
 | 
				
			||||||
 | 
					          : getMessageTextContent(v)
 | 
				
			||||||
 | 
					      ) as any;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					      messages.push({ role: v.role, content });
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    const shouldStream = !!options.config.stream;
 | 
					    const shouldStream = !!options.config.stream;
 | 
				
			||||||
    const requestPayload: RequestPayload = {
 | 
					    const requestPayload: RequestPayload = {
 | 
				
			||||||
      model: modelConfig.model,
 | 
					      model: modelConfig.model,
 | 
				
			||||||
@@ -129,7 +141,7 @@ export class QwenApi implements LLMApi {
 | 
				
			|||||||
        "X-DashScope-SSE": shouldStream ? "enable" : "disable",
 | 
					        "X-DashScope-SSE": shouldStream ? "enable" : "disable",
 | 
				
			||||||
      };
 | 
					      };
 | 
				
			||||||
 | 
					
 | 
				
			||||||
      const chatPath = this.path(Alibaba.ChatPath);
 | 
					      const chatPath = this.path(Alibaba.ChatPath(modelConfig.model));
 | 
				
			||||||
      const chatPayload = {
 | 
					      const chatPayload = {
 | 
				
			||||||
        method: "POST",
 | 
					        method: "POST",
 | 
				
			||||||
        body: JSON.stringify(requestPayload),
 | 
					        body: JSON.stringify(requestPayload),
 | 
				
			||||||
@@ -162,7 +174,7 @@ export class QwenApi implements LLMApi {
 | 
				
			|||||||
            const json = JSON.parse(text);
 | 
					            const json = JSON.parse(text);
 | 
				
			||||||
            const choices = json.output.choices as Array<{
 | 
					            const choices = json.output.choices as Array<{
 | 
				
			||||||
              message: {
 | 
					              message: {
 | 
				
			||||||
                content: string | null;
 | 
					                content: string | null | MultimodalContentForAlibaba[];
 | 
				
			||||||
                tool_calls: ChatMessageTool[];
 | 
					                tool_calls: ChatMessageTool[];
 | 
				
			||||||
                reasoning_content: string | null;
 | 
					                reasoning_content: string | null;
 | 
				
			||||||
              };
 | 
					              };
 | 
				
			||||||
@@ -212,7 +224,9 @@ export class QwenApi implements LLMApi {
 | 
				
			|||||||
            } else if (content && content.length > 0) {
 | 
					            } else if (content && content.length > 0) {
 | 
				
			||||||
              return {
 | 
					              return {
 | 
				
			||||||
                isThinking: false,
 | 
					                isThinking: false,
 | 
				
			||||||
                content: content,
 | 
					                content: Array.isArray(content)
 | 
				
			||||||
 | 
					                  ? content.map((item) => item.text).join(",")
 | 
				
			||||||
 | 
					                  : content,
 | 
				
			||||||
              };
 | 
					              };
 | 
				
			||||||
            }
 | 
					            }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -75,6 +75,25 @@ export class DeepSeekApi implements LLMApi {
 | 
				
			|||||||
      }
 | 
					      }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    // 检测并修复消息顺序,确保除system外的第一个消息是user
 | 
				
			||||||
 | 
					    const filteredMessages: ChatOptions["messages"] = [];
 | 
				
			||||||
 | 
					    let hasFoundFirstUser = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    for (const msg of messages) {
 | 
				
			||||||
 | 
					      if (msg.role === "system") {
 | 
				
			||||||
 | 
					        // Keep all system messages
 | 
				
			||||||
 | 
					        filteredMessages.push(msg);
 | 
				
			||||||
 | 
					      } else if (msg.role === "user") {
 | 
				
			||||||
 | 
					        // User message directly added
 | 
				
			||||||
 | 
					        filteredMessages.push(msg);
 | 
				
			||||||
 | 
					        hasFoundFirstUser = true;
 | 
				
			||||||
 | 
					      } else if (hasFoundFirstUser) {
 | 
				
			||||||
 | 
					        // After finding the first user message, all subsequent non-system messages are retained.
 | 
				
			||||||
 | 
					        filteredMessages.push(msg);
 | 
				
			||||||
 | 
					      }
 | 
				
			||||||
 | 
					      // If hasFoundFirstUser is false and it is not a system message, it will be skipped.
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    const modelConfig = {
 | 
					    const modelConfig = {
 | 
				
			||||||
      ...useAppConfig.getState().modelConfig,
 | 
					      ...useAppConfig.getState().modelConfig,
 | 
				
			||||||
      ...useChatStore.getState().currentSession().mask.modelConfig,
 | 
					      ...useChatStore.getState().currentSession().mask.modelConfig,
 | 
				
			||||||
@@ -85,7 +104,7 @@ export class DeepSeekApi implements LLMApi {
 | 
				
			|||||||
    };
 | 
					    };
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    const requestPayload: RequestPayload = {
 | 
					    const requestPayload: RequestPayload = {
 | 
				
			||||||
      messages,
 | 
					      messages: filteredMessages,
 | 
				
			||||||
      stream: options.config.stream,
 | 
					      stream: options.config.stream,
 | 
				
			||||||
      model: modelConfig.model,
 | 
					      model: modelConfig.model,
 | 
				
			||||||
      temperature: modelConfig.temperature,
 | 
					      temperature: modelConfig.temperature,
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -198,7 +198,8 @@ export class ChatGPTApi implements LLMApi {
 | 
				
			|||||||
    const isDalle3 = _isDalle3(options.config.model);
 | 
					    const isDalle3 = _isDalle3(options.config.model);
 | 
				
			||||||
    const isO1OrO3 =
 | 
					    const isO1OrO3 =
 | 
				
			||||||
      options.config.model.startsWith("o1") ||
 | 
					      options.config.model.startsWith("o1") ||
 | 
				
			||||||
      options.config.model.startsWith("o3");
 | 
					      options.config.model.startsWith("o3") ||
 | 
				
			||||||
 | 
					      options.config.model.startsWith("o4-mini");
 | 
				
			||||||
    if (isDalle3) {
 | 
					    if (isDalle3) {
 | 
				
			||||||
      const prompt = getMessageTextContent(
 | 
					      const prompt = getMessageTextContent(
 | 
				
			||||||
        options.messages.slice(-1)?.pop() as any,
 | 
					        options.messages.slice(-1)?.pop() as any,
 | 
				
			||||||
@@ -243,7 +244,7 @@ export class ChatGPTApi implements LLMApi {
 | 
				
			|||||||
      }
 | 
					      }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
      // add max_tokens to vision model
 | 
					      // add max_tokens to vision model
 | 
				
			||||||
      if (visionModel) {
 | 
					      if (visionModel && !isO1OrO3) {
 | 
				
			||||||
        requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
 | 
					        requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
 | 
				
			||||||
      }
 | 
					      }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -66,11 +66,11 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
 | 
				
			|||||||
      LlmIcon = BotIconGemma;
 | 
					      LlmIcon = BotIconGemma;
 | 
				
			||||||
    } else if (modelName.startsWith("claude")) {
 | 
					    } else if (modelName.startsWith("claude")) {
 | 
				
			||||||
      LlmIcon = BotIconClaude;
 | 
					      LlmIcon = BotIconClaude;
 | 
				
			||||||
    } else if (modelName.toLowerCase().includes("llama")) {
 | 
					    } else if (modelName.includes("llama")) {
 | 
				
			||||||
      LlmIcon = BotIconMeta;
 | 
					      LlmIcon = BotIconMeta;
 | 
				
			||||||
    } else if (modelName.startsWith("mixtral")) {
 | 
					    } else if (modelName.startsWith("mixtral") || modelName.startsWith("codestral")) {
 | 
				
			||||||
      LlmIcon = BotIconMistral;
 | 
					      LlmIcon = BotIconMistral;
 | 
				
			||||||
    } else if (modelName.toLowerCase().includes("deepseek")) {
 | 
					    } else if (modelName.includes("deepseek")) {
 | 
				
			||||||
      LlmIcon = BotIconDeepseek;
 | 
					      LlmIcon = BotIconDeepseek;
 | 
				
			||||||
    } else if (modelName.startsWith("moonshot")) {
 | 
					    } else if (modelName.startsWith("moonshot")) {
 | 
				
			||||||
      LlmIcon = BotIconMoonshot;
 | 
					      LlmIcon = BotIconMoonshot;
 | 
				
			||||||
@@ -85,7 +85,7 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
 | 
				
			|||||||
    } else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
 | 
					    } else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
 | 
				
			||||||
      LlmIcon = BotIconDoubao;
 | 
					      LlmIcon = BotIconDoubao;
 | 
				
			||||||
    } else if (
 | 
					    } else if (
 | 
				
			||||||
      modelName.toLowerCase().includes("glm") ||
 | 
					      modelName.includes("glm") ||
 | 
				
			||||||
      modelName.startsWith("cogview-") ||
 | 
					      modelName.startsWith("cogview-") ||
 | 
				
			||||||
      modelName.startsWith("cogvideox-")
 | 
					      modelName.startsWith("cogvideox-")
 | 
				
			||||||
    ) {
 | 
					    ) {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -221,7 +221,12 @@ export const ByteDance = {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
export const Alibaba = {
 | 
					export const Alibaba = {
 | 
				
			||||||
  ExampleEndpoint: ALIBABA_BASE_URL,
 | 
					  ExampleEndpoint: ALIBABA_BASE_URL,
 | 
				
			||||||
  ChatPath: "v1/services/aigc/text-generation/generation",
 | 
					  ChatPath: (modelName: string) => {
 | 
				
			||||||
 | 
					    if (modelName.includes("vl") || modelName.includes("omni")) {
 | 
				
			||||||
 | 
					      return "v1/services/aigc/multimodal-generation/generation";
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					    return `v1/services/aigc/text-generation/generation`;
 | 
				
			||||||
 | 
					  },
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export const Tencent = {
 | 
					export const Tencent = {
 | 
				
			||||||
@@ -412,6 +417,14 @@ export const KnowledgeCutOffDate: Record<string, string> = {
 | 
				
			|||||||
  "gpt-4-turbo": "2023-12",
 | 
					  "gpt-4-turbo": "2023-12",
 | 
				
			||||||
  "gpt-4-turbo-2024-04-09": "2023-12",
 | 
					  "gpt-4-turbo-2024-04-09": "2023-12",
 | 
				
			||||||
  "gpt-4-turbo-preview": "2023-12",
 | 
					  "gpt-4-turbo-preview": "2023-12",
 | 
				
			||||||
 | 
					  "gpt-4.1": "2024-06",
 | 
				
			||||||
 | 
					  "gpt-4.1-2025-04-14": "2024-06",
 | 
				
			||||||
 | 
					  "gpt-4.1-mini": "2024-06",
 | 
				
			||||||
 | 
					  "gpt-4.1-mini-2025-04-14": "2024-06",
 | 
				
			||||||
 | 
					  "gpt-4.1-nano": "2024-06",
 | 
				
			||||||
 | 
					  "gpt-4.1-nano-2025-04-14": "2024-06",
 | 
				
			||||||
 | 
					  "gpt-4.5-preview": "2023-10",
 | 
				
			||||||
 | 
					  "gpt-4.5-preview-2025-02-27": "2023-10",
 | 
				
			||||||
  "gpt-4o": "2023-10",
 | 
					  "gpt-4o": "2023-10",
 | 
				
			||||||
  "gpt-4o-2024-05-13": "2023-10",
 | 
					  "gpt-4o-2024-05-13": "2023-10",
 | 
				
			||||||
  "gpt-4o-2024-08-06": "2023-10",
 | 
					  "gpt-4o-2024-08-06": "2023-10",
 | 
				
			||||||
@@ -453,6 +466,7 @@ export const DEFAULT_TTS_VOICES = [
 | 
				
			|||||||
export const VISION_MODEL_REGEXES = [
 | 
					export const VISION_MODEL_REGEXES = [
 | 
				
			||||||
  /vision/,
 | 
					  /vision/,
 | 
				
			||||||
  /gpt-4o/,
 | 
					  /gpt-4o/,
 | 
				
			||||||
 | 
					  /gpt-4\.1/,
 | 
				
			||||||
  /claude-3/,
 | 
					  /claude-3/,
 | 
				
			||||||
  /gemini-1\.5/,
 | 
					  /gemini-1\.5/,
 | 
				
			||||||
  /gemini-exp/,
 | 
					  /gemini-exp/,
 | 
				
			||||||
@@ -464,6 +478,8 @@ export const VISION_MODEL_REGEXES = [
 | 
				
			|||||||
  /^dall-e-3$/, // Matches exactly "dall-e-3"
 | 
					  /^dall-e-3$/, // Matches exactly "dall-e-3"
 | 
				
			||||||
  /glm-4v/,
 | 
					  /glm-4v/,
 | 
				
			||||||
  /vl/i,
 | 
					  /vl/i,
 | 
				
			||||||
 | 
					  /o3/,
 | 
				
			||||||
 | 
					  /o4-mini/,
 | 
				
			||||||
];
 | 
					];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
 | 
					export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
 | 
				
			||||||
@@ -480,6 +496,14 @@ const openaiModels = [
 | 
				
			|||||||
  "gpt-4-32k-0613",
 | 
					  "gpt-4-32k-0613",
 | 
				
			||||||
  "gpt-4-turbo",
 | 
					  "gpt-4-turbo",
 | 
				
			||||||
  "gpt-4-turbo-preview",
 | 
					  "gpt-4-turbo-preview",
 | 
				
			||||||
 | 
					  "gpt-4.1",
 | 
				
			||||||
 | 
					  "gpt-4.1-2025-04-14",
 | 
				
			||||||
 | 
					  "gpt-4.1-mini",
 | 
				
			||||||
 | 
					  "gpt-4.1-mini-2025-04-14",
 | 
				
			||||||
 | 
					  "gpt-4.1-nano",
 | 
				
			||||||
 | 
					  "gpt-4.1-nano-2025-04-14",
 | 
				
			||||||
 | 
					  "gpt-4.5-preview",
 | 
				
			||||||
 | 
					  "gpt-4.5-preview-2025-02-27",
 | 
				
			||||||
  "gpt-4o",
 | 
					  "gpt-4o",
 | 
				
			||||||
  "gpt-4o-2024-05-13",
 | 
					  "gpt-4o-2024-05-13",
 | 
				
			||||||
  "gpt-4o-2024-08-06",
 | 
					  "gpt-4o-2024-08-06",
 | 
				
			||||||
@@ -494,6 +518,8 @@ const openaiModels = [
 | 
				
			|||||||
  "o1-mini",
 | 
					  "o1-mini",
 | 
				
			||||||
  "o1-preview",
 | 
					  "o1-preview",
 | 
				
			||||||
  "o3-mini",
 | 
					  "o3-mini",
 | 
				
			||||||
 | 
					  "o3",
 | 
				
			||||||
 | 
					  "o4-mini",
 | 
				
			||||||
];
 | 
					];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const googleModels = [
 | 
					const googleModels = [
 | 
				
			||||||
@@ -535,6 +561,8 @@ const anthropicModels = [
 | 
				
			|||||||
  "claude-3-5-sonnet-20240620",
 | 
					  "claude-3-5-sonnet-20240620",
 | 
				
			||||||
  "claude-3-5-sonnet-20241022",
 | 
					  "claude-3-5-sonnet-20241022",
 | 
				
			||||||
  "claude-3-5-sonnet-latest",
 | 
					  "claude-3-5-sonnet-latest",
 | 
				
			||||||
 | 
					  "claude-3-7-sonnet-20250219",
 | 
				
			||||||
 | 
					  "claude-3-7-sonnet-latest",
 | 
				
			||||||
];
 | 
					];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const baiduModels = [
 | 
					const baiduModels = [
 | 
				
			||||||
@@ -568,6 +596,9 @@ const alibabaModes = [
 | 
				
			|||||||
  "qwen-max-0403",
 | 
					  "qwen-max-0403",
 | 
				
			||||||
  "qwen-max-0107",
 | 
					  "qwen-max-0107",
 | 
				
			||||||
  "qwen-max-longcontext",
 | 
					  "qwen-max-longcontext",
 | 
				
			||||||
 | 
					  "qwen-omni-turbo",
 | 
				
			||||||
 | 
					  "qwen-vl-plus",
 | 
				
			||||||
 | 
					  "qwen-vl-max",
 | 
				
			||||||
];
 | 
					];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const tencentModels = [
 | 
					const tencentModels = [
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -3,7 +3,7 @@ import {
 | 
				
			|||||||
  UPLOAD_URL,
 | 
					  UPLOAD_URL,
 | 
				
			||||||
  REQUEST_TIMEOUT_MS,
 | 
					  REQUEST_TIMEOUT_MS,
 | 
				
			||||||
} from "@/app/constant";
 | 
					} from "@/app/constant";
 | 
				
			||||||
import { RequestMessage } from "@/app/client/api";
 | 
					import { MultimodalContent, RequestMessage } from "@/app/client/api";
 | 
				
			||||||
import Locale from "@/app/locales";
 | 
					import Locale from "@/app/locales";
 | 
				
			||||||
import {
 | 
					import {
 | 
				
			||||||
  EventStreamContentType,
 | 
					  EventStreamContentType,
 | 
				
			||||||
@@ -70,8 +70,9 @@ export function compressImage(file: Blob, maxSize: number): Promise<string> {
 | 
				
			|||||||
  });
 | 
					  });
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export async function preProcessImageContent(
 | 
					export async function preProcessImageContentBase(
 | 
				
			||||||
  content: RequestMessage["content"],
 | 
					  content: RequestMessage["content"],
 | 
				
			||||||
 | 
					  transformImageUrl: (url: string) => Promise<{ [key: string]: any }>,
 | 
				
			||||||
) {
 | 
					) {
 | 
				
			||||||
  if (typeof content === "string") {
 | 
					  if (typeof content === "string") {
 | 
				
			||||||
    return content;
 | 
					    return content;
 | 
				
			||||||
@@ -81,7 +82,7 @@ export async function preProcessImageContent(
 | 
				
			|||||||
    if (part?.type == "image_url" && part?.image_url?.url) {
 | 
					    if (part?.type == "image_url" && part?.image_url?.url) {
 | 
				
			||||||
      try {
 | 
					      try {
 | 
				
			||||||
        const url = await cacheImageToBase64Image(part?.image_url?.url);
 | 
					        const url = await cacheImageToBase64Image(part?.image_url?.url);
 | 
				
			||||||
        result.push({ type: part.type, image_url: { url } });
 | 
					        result.push(await transformImageUrl(url));
 | 
				
			||||||
      } catch (error) {
 | 
					      } catch (error) {
 | 
				
			||||||
        console.error("Error processing image URL:", error);
 | 
					        console.error("Error processing image URL:", error);
 | 
				
			||||||
      }
 | 
					      }
 | 
				
			||||||
@@ -92,6 +93,23 @@ export async function preProcessImageContent(
 | 
				
			|||||||
  return result;
 | 
					  return result;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export async function preProcessImageContent(
 | 
				
			||||||
 | 
					  content: RequestMessage["content"],
 | 
				
			||||||
 | 
					) {
 | 
				
			||||||
 | 
					  return preProcessImageContentBase(content, async (url) => ({
 | 
				
			||||||
 | 
					    type: "image_url",
 | 
				
			||||||
 | 
					    image_url: { url },
 | 
				
			||||||
 | 
					  })) as Promise<MultimodalContent[] | string>;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export async function preProcessImageContentForAlibabaDashScope(
 | 
				
			||||||
 | 
					  content: RequestMessage["content"],
 | 
				
			||||||
 | 
					) {
 | 
				
			||||||
 | 
					  return preProcessImageContentBase(content, async (url) => ({
 | 
				
			||||||
 | 
					    image: url,
 | 
				
			||||||
 | 
					  }));
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const imageCaches: Record<string, string> = {};
 | 
					const imageCaches: Record<string, string> = {};
 | 
				
			||||||
export function cacheImageToBase64Image(imageUrl: string) {
 | 
					export function cacheImageToBase64Image(imageUrl: string) {
 | 
				
			||||||
  if (imageUrl.includes(CACHE_URL_PREFIX)) {
 | 
					  if (imageUrl.includes(CACHE_URL_PREFIX)) {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -15,6 +15,8 @@ const config: Config = {
 | 
				
			|||||||
  moduleNameMapper: {
 | 
					  moduleNameMapper: {
 | 
				
			||||||
    "^@/(.*)$": "<rootDir>/$1",
 | 
					    "^@/(.*)$": "<rootDir>/$1",
 | 
				
			||||||
  },
 | 
					  },
 | 
				
			||||||
 | 
					  extensionsToTreatAsEsm: [".ts", ".tsx"],
 | 
				
			||||||
 | 
					  injectGlobals: true,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// createJestConfig is exported this way to ensure that next/jest can load the Next.js config which is async
 | 
					// createJestConfig is exported this way to ensure that next/jest can load the Next.js config which is async
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,24 +1,22 @@
 | 
				
			|||||||
// Learn more: https://github.com/testing-library/jest-dom
 | 
					// Learn more: https://github.com/testing-library/jest-dom
 | 
				
			||||||
import "@testing-library/jest-dom";
 | 
					import "@testing-library/jest-dom";
 | 
				
			||||||
 | 
					import { jest } from "@jest/globals";
 | 
				
			||||||
 | 
					
 | 
				
			||||||
global.fetch = jest.fn(() =>
 | 
					global.fetch = jest.fn(() =>
 | 
				
			||||||
  Promise.resolve({
 | 
					  Promise.resolve({
 | 
				
			||||||
    ok: true,
 | 
					    ok: true,
 | 
				
			||||||
    status: 200,
 | 
					    status: 200,
 | 
				
			||||||
    json: () => Promise.resolve({}),
 | 
					    json: () => Promise.resolve([]),
 | 
				
			||||||
    headers: new Headers(),
 | 
					    headers: new Headers(),
 | 
				
			||||||
    redirected: false,
 | 
					    redirected: false,
 | 
				
			||||||
    statusText: "OK",
 | 
					    statusText: "OK",
 | 
				
			||||||
    type: "basic",
 | 
					    type: "basic",
 | 
				
			||||||
    url: "",
 | 
					    url: "",
 | 
				
			||||||
    clone: function () {
 | 
					 | 
				
			||||||
      return this;
 | 
					 | 
				
			||||||
    },
 | 
					 | 
				
			||||||
    body: null,
 | 
					    body: null,
 | 
				
			||||||
    bodyUsed: false,
 | 
					    bodyUsed: false,
 | 
				
			||||||
    arrayBuffer: () => Promise.resolve(new ArrayBuffer(0)),
 | 
					    arrayBuffer: () => Promise.resolve(new ArrayBuffer(0)),
 | 
				
			||||||
    blob: () => Promise.resolve(new Blob()),
 | 
					    blob: () => Promise.resolve(new Blob()),
 | 
				
			||||||
    formData: () => Promise.resolve(new FormData()),
 | 
					    formData: () => Promise.resolve(new FormData()),
 | 
				
			||||||
    text: () => Promise.resolve(""),
 | 
					    text: () => Promise.resolve(""),
 | 
				
			||||||
  }),
 | 
					  } as Response),
 | 
				
			||||||
);
 | 
					);
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,8 +17,8 @@
 | 
				
			|||||||
    "prompts": "node ./scripts/fetch-prompts.mjs",
 | 
					    "prompts": "node ./scripts/fetch-prompts.mjs",
 | 
				
			||||||
    "prepare": "husky install",
 | 
					    "prepare": "husky install",
 | 
				
			||||||
    "proxy-dev": "sh ./scripts/init-proxy.sh && proxychains -f ./scripts/proxychains.conf yarn dev",
 | 
					    "proxy-dev": "sh ./scripts/init-proxy.sh && proxychains -f ./scripts/proxychains.conf yarn dev",
 | 
				
			||||||
    "test": "jest --watch",
 | 
					    "test": "node --no-warnings --experimental-vm-modules $(yarn bin jest) --watch",
 | 
				
			||||||
    "test:ci": "jest --ci"
 | 
					    "test:ci": "node --no-warnings --experimental-vm-modules $(yarn bin jest) --ci"
 | 
				
			||||||
  },
 | 
					  },
 | 
				
			||||||
  "dependencies": {
 | 
					  "dependencies": {
 | 
				
			||||||
    "@fortaine/fetch-event-source": "^3.0.6",
 | 
					    "@fortaine/fetch-event-source": "^3.0.6",
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,3 +1,4 @@
 | 
				
			|||||||
 | 
					import { jest } from "@jest/globals";
 | 
				
			||||||
import { isVisionModel } from "../app/utils";
 | 
					import { isVisionModel } from "../app/utils";
 | 
				
			||||||
 | 
					
 | 
				
			||||||
describe("isVisionModel", () => {
 | 
					describe("isVisionModel", () => {
 | 
				
			||||||
@@ -50,7 +51,7 @@ describe("isVisionModel", () => {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
  test("should identify models from VISION_MODELS env var", () => {
 | 
					  test("should identify models from VISION_MODELS env var", () => {
 | 
				
			||||||
    process.env.VISION_MODELS = "custom-vision-model,another-vision-model";
 | 
					    process.env.VISION_MODELS = "custom-vision-model,another-vision-model";
 | 
				
			||||||
    
 | 
					
 | 
				
			||||||
    expect(isVisionModel("custom-vision-model")).toBe(true);
 | 
					    expect(isVisionModel("custom-vision-model")).toBe(true);
 | 
				
			||||||
    expect(isVisionModel("another-vision-model")).toBe(true);
 | 
					    expect(isVisionModel("another-vision-model")).toBe(true);
 | 
				
			||||||
    expect(isVisionModel("unrelated-model")).toBe(false);
 | 
					    expect(isVisionModel("unrelated-model")).toBe(false);
 | 
				
			||||||
@@ -64,4 +65,4 @@ describe("isVisionModel", () => {
 | 
				
			|||||||
    expect(isVisionModel("unrelated-model")).toBe(false);
 | 
					    expect(isVisionModel("unrelated-model")).toBe(false);
 | 
				
			||||||
    expect(isVisionModel("gpt-4-vision")).toBe(true);
 | 
					    expect(isVisionModel("gpt-4-vision")).toBe(true);
 | 
				
			||||||
  });
 | 
					  });
 | 
				
			||||||
});
 | 
					});
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user