mirror of
				https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
				synced 2025-11-04 08:13:43 +08:00 
			
		
		
		
	Compare commits
	
		
			21 Commits
		
	
	
		
			f2a5af7556
			...
			dependabot
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					f9d3c93633 | ||
| 
						 | 
					673f907ea4 | ||
| 
						 | 
					fb3af2a08f | ||
| 
						 | 
					eb193ac0ff | ||
| 
						 | 
					c30ddfbb07 | ||
| 
						 | 
					a2f0149786 | ||
| 
						 | 
					03d36f96ed | ||
| 
						 | 
					705dffc664 | ||
| 
						 | 
					02f7e6de98 | ||
| 
						 | 
					843dc52efa | ||
| 
						 | 
					3809375694 | ||
| 
						 | 
					1b0de25986 | ||
| 
						 | 
					865c45dd29 | ||
| 
						 | 
					1f5d8e6d9c | ||
| 
						 | 
					c9ef6d58ed | ||
| 
						 | 
					2d7229d2b8 | ||
| 
						 | 
					11b37c15bd | ||
| 
						 | 
					1d0038f17d | ||
| 
						 | 
					619fa519c0 | ||
| 
						 | 
					c261ebc82c | ||
| 
						 | 
					f7c747c65f | 
							
								
								
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							@@ -49,4 +49,3 @@ masks.json
 | 
			
		||||
 | 
			
		||||
# mcp config
 | 
			
		||||
app/mcp/mcp_config.json
 | 
			
		||||
Dockerfile.local
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										17
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										17
									
								
								README.md
									
									
									
									
									
								
							@@ -22,12 +22,12 @@ English / [简体中文](./README_CN.md)
 | 
			
		||||
[![MacOS][MacOS-image]][download-url]
 | 
			
		||||
[![Linux][Linux-image]][download-url]
 | 
			
		||||
 | 
			
		||||
[NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
 | 
			
		||||
[NextChatAI](https://nextchat.club?utm_source=readme) / [iOS APP](https://apps.apple.com/us/app/nextchat-ai/id6743085599) / [Web App Demo](https://app.nextchat.club) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Enterprise Edition](#enterprise-edition) 
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
[saas-url]: https://nextchat.club?utm_source=readme
 | 
			
		||||
[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge
 | 
			
		||||
[web-url]: https://app.nextchat.dev/
 | 
			
		||||
[web-url]: https://app.nextchat.club/
 | 
			
		||||
[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases
 | 
			
		||||
[Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge
 | 
			
		||||
[Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows
 | 
			
		||||
@@ -40,13 +40,14 @@ English / [简体中文](./README_CN.md)
 | 
			
		||||
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
## 🥳 Cheer for DeepSeek, China's AI star!
 | 
			
		||||
 > Purpose-Built UI for DeepSeek Reasoner Model
 | 
			
		||||
## 🥳 Cheer for NextChat iOS Version Online!
 | 
			
		||||
> [👉 Click Here to Install Now](https://apps.apple.com/us/app/nextchat-ai/id6743085599)
 | 
			
		||||
 | 
			
		||||
> [❤️ Source Code Coming Soon](https://github.com/ChatGPTNextWeb/NextChat-iOS)
 | 
			
		||||
 | 
			
		||||

 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
<img src="https://github.com/user-attachments/assets/f3952210-3af1-4dc0-9b81-40eaa4847d9a"/>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## 🫣 NextChat Support MCP  ! 
 | 
			
		||||
> Before build, please set env ENABLE_MCP=true
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -71,6 +71,8 @@ const ClaudeMapper = {
 | 
			
		||||
  system: "user",
 | 
			
		||||
} as const;
 | 
			
		||||
 | 
			
		||||
const keys = ["claude-2, claude-instant-1"];
 | 
			
		||||
 | 
			
		||||
export class ClaudeApi implements LLMApi {
 | 
			
		||||
  speech(options: SpeechOptions): Promise<ArrayBuffer> {
 | 
			
		||||
    throw new Error("Method not implemented.");
 | 
			
		||||
 
 | 
			
		||||
@@ -197,6 +197,8 @@ export class GeminiProApi implements LLMApi {
 | 
			
		||||
        signal: controller.signal,
 | 
			
		||||
        headers: getHeaders(),
 | 
			
		||||
      };
 | 
			
		||||
 | 
			
		||||
      const isThinking = options.config.model.includes("-thinking");
 | 
			
		||||
      // make a fetch request
 | 
			
		||||
      const requestTimeoutId = setTimeout(
 | 
			
		||||
        () => controller.abort(),
 | 
			
		||||
 
 | 
			
		||||
@@ -56,7 +56,7 @@ export interface OpenAIListModelResponse {
 | 
			
		||||
 | 
			
		||||
export interface RequestPayload {
 | 
			
		||||
  messages: {
 | 
			
		||||
    role: "system" | "user" | "assistant";
 | 
			
		||||
    role: "developer" | "system" | "user" | "assistant";
 | 
			
		||||
    content: string | MultimodalContent[];
 | 
			
		||||
  }[];
 | 
			
		||||
  stream?: boolean;
 | 
			
		||||
@@ -67,8 +67,6 @@ export interface RequestPayload {
 | 
			
		||||
  top_p: number;
 | 
			
		||||
  max_tokens?: number;
 | 
			
		||||
  max_completion_tokens?: number;
 | 
			
		||||
  reasoning_effort?: string;
 | 
			
		||||
  // O3 only
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface DalleRequestPayload {
 | 
			
		||||
@@ -198,9 +196,10 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
    let requestPayload: RequestPayload | DalleRequestPayload;
 | 
			
		||||
 | 
			
		||||
    const isDalle3 = _isDalle3(options.config.model);
 | 
			
		||||
    const isO1 = options.config.model.startsWith("o1");
 | 
			
		||||
    const isO3 = options.config.model.startsWith("o3");
 | 
			
		||||
    const isO1OrO3 = isO1 || isO3;
 | 
			
		||||
    const isO1OrO3 =
 | 
			
		||||
      options.config.model.startsWith("o1") ||
 | 
			
		||||
      options.config.model.startsWith("o3") ||
 | 
			
		||||
      options.config.model.startsWith("o4-mini");
 | 
			
		||||
    if (isDalle3) {
 | 
			
		||||
      const prompt = getMessageTextContent(
 | 
			
		||||
        options.messages.slice(-1)?.pop() as any,
 | 
			
		||||
@@ -239,23 +238,22 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
        // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
 | 
			
		||||
      };
 | 
			
		||||
 | 
			
		||||
      // O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
 | 
			
		||||
      if (isO1OrO3) {
 | 
			
		||||
        // by default the o1/o3 models will not attempt to produce output that includes markdown formatting
 | 
			
		||||
        // manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
 | 
			
		||||
        // (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
 | 
			
		||||
        requestPayload["messages"].unshift({
 | 
			
		||||
          role: "developer",
 | 
			
		||||
          content: "Formatting re-enabled",
 | 
			
		||||
        });
 | 
			
		||||
 | 
			
		||||
        // o1/o3 uses max_completion_tokens to control the number of tokens (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
 | 
			
		||||
        requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      if (isO3) {
 | 
			
		||||
        requestPayload["reasoning_effort"] = "high";
 | 
			
		||||
        // make o3-mini defaults to high reasoning effort
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      // add max_tokens to vision model
 | 
			
		||||
      if (visionModel) {
 | 
			
		||||
        if (isO1) {
 | 
			
		||||
          requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
 | 
			
		||||
        } else {
 | 
			
		||||
          requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
 | 
			
		||||
        }
 | 
			
		||||
      if (visionModel && !isO1OrO3) {
 | 
			
		||||
        requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -297,11 +295,6 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
          isDalle3 ? OpenaiPath.ImagePath : OpenaiPath.ChatPath,
 | 
			
		||||
        );
 | 
			
		||||
      }
 | 
			
		||||
      // make a fetch request
 | 
			
		||||
      const requestTimeoutId = setTimeout(
 | 
			
		||||
        () => controller.abort(),
 | 
			
		||||
        getTimeoutMSByModel(options.config.model),
 | 
			
		||||
      );
 | 
			
		||||
      if (shouldStream) {
 | 
			
		||||
        let index = -1;
 | 
			
		||||
        const [tools, funcs] = usePluginStore
 | 
			
		||||
@@ -409,6 +402,12 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
          headers: getHeaders(),
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        // make a fetch request
 | 
			
		||||
        const requestTimeoutId = setTimeout(
 | 
			
		||||
          () => controller.abort(),
 | 
			
		||||
          getTimeoutMSByModel(options.config.model),
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        const res = await fetch(chatPath, chatPayload);
 | 
			
		||||
        clearTimeout(requestTimeoutId);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										109
									
								
								app/constant.ts
									
									
									
									
									
								
							
							
						
						
									
										109
									
								
								app/constant.ts
									
									
									
									
									
								
							@@ -409,17 +409,42 @@ You are an AI assistant with access to system tools. Your role is to help users
 | 
			
		||||
`;
 | 
			
		||||
 | 
			
		||||
export const SUMMARIZE_MODEL = "gpt-4o-mini";
 | 
			
		||||
export const GEMINI_SUMMARIZE_MODEL = "gemini-2.0-flash";
 | 
			
		||||
export const GEMINI_SUMMARIZE_MODEL = "gemini-pro";
 | 
			
		||||
export const DEEPSEEK_SUMMARIZE_MODEL = "deepseek-chat";
 | 
			
		||||
 | 
			
		||||
export const KnowledgeCutOffDate: Record<string, string> = {
 | 
			
		||||
  default: "2023-10",
 | 
			
		||||
  default: "2021-09",
 | 
			
		||||
  "gpt-4-turbo": "2023-12",
 | 
			
		||||
  "gpt-4-turbo-2024-04-09": "2023-12",
 | 
			
		||||
  "gpt-4-turbo-preview": "2023-12",
 | 
			
		||||
  "gpt-4.1": "2024-06",
 | 
			
		||||
  "gpt-4.1-2025-04-14": "2024-06",
 | 
			
		||||
  "gpt-4.1-mini": "2024-06",
 | 
			
		||||
  "gpt-4.1-mini-2025-04-14": "2024-06",
 | 
			
		||||
  "gpt-4.1-nano": "2024-06",
 | 
			
		||||
  "gpt-4.1-nano-2025-04-14": "2024-06",
 | 
			
		||||
  "gpt-4.5-preview": "2023-10",
 | 
			
		||||
  "gpt-4.5-preview-2025-02-27": "2023-10",
 | 
			
		||||
  "gpt-4o": "2023-10",
 | 
			
		||||
  "gpt-4o-2024-05-13": "2023-10",
 | 
			
		||||
  "gpt-4o-2024-08-06": "2023-10",
 | 
			
		||||
  "gpt-4o-2024-11-20": "2023-10",
 | 
			
		||||
  "chatgpt-4o-latest": "2023-10",
 | 
			
		||||
  "gpt-4o-mini": "2023-10",
 | 
			
		||||
  "gpt-4o-mini-2024-07-18": "2023-10",
 | 
			
		||||
  "gpt-4-vision-preview": "2023-04",
 | 
			
		||||
  "o1-mini-2024-09-12": "2023-10",
 | 
			
		||||
  "o1-mini": "2023-10",
 | 
			
		||||
  "o1-preview-2024-09-12": "2023-10",
 | 
			
		||||
  "o1-preview": "2023-10",
 | 
			
		||||
  "o1-2024-12-17": "2023-10",
 | 
			
		||||
  o1: "2023-10",
 | 
			
		||||
  "o3-mini-2025-01-31": "2023-10",
 | 
			
		||||
  "o3-mini": "2023-10",
 | 
			
		||||
  // After improvements,
 | 
			
		||||
  // it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
 | 
			
		||||
  "gemini-2.5-pro-exp-03-25": "2025-01",
 | 
			
		||||
  "gemini-2.0-flash": "2024-08",
 | 
			
		||||
  "claude-3-7-sonnet-latest": "2024-10",
 | 
			
		||||
  "claude-3-5-haiku-latest": "2024-10",
 | 
			
		||||
  "gemini-pro": "2023-12",
 | 
			
		||||
  "gemini-pro-vision": "2023-12",
 | 
			
		||||
  "deepseek-chat": "2024-07",
 | 
			
		||||
  "deepseek-coder": "2024-07",
 | 
			
		||||
};
 | 
			
		||||
@@ -441,11 +466,11 @@ export const DEFAULT_TTS_VOICES = [
 | 
			
		||||
export const VISION_MODEL_REGEXES = [
 | 
			
		||||
  /vision/,
 | 
			
		||||
  /gpt-4o/,
 | 
			
		||||
  /gpt-4\.1/,
 | 
			
		||||
  /claude-3/,
 | 
			
		||||
  /gemini-1\.5/,
 | 
			
		||||
  /gemini-exp/,
 | 
			
		||||
  /gemini-2\.0/,
 | 
			
		||||
  /gemini-2\.5-pro/,
 | 
			
		||||
  /learnlm/,
 | 
			
		||||
  /qwen-vl/,
 | 
			
		||||
  /qwen2-vl/,
 | 
			
		||||
@@ -453,7 +478,8 @@ export const VISION_MODEL_REGEXES = [
 | 
			
		||||
  /^dall-e-3$/, // Matches exactly "dall-e-3"
 | 
			
		||||
  /glm-4v/,
 | 
			
		||||
  /vl/i,
 | 
			
		||||
  /o1/,
 | 
			
		||||
  /o3/,
 | 
			
		||||
  /o4-mini/,
 | 
			
		||||
];
 | 
			
		||||
 | 
			
		||||
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
 | 
			
		||||
@@ -462,29 +488,76 @@ const openaiModels = [
 | 
			
		||||
  // As of July 2024, gpt-4o-mini should be used in place of gpt-3.5-turbo,
 | 
			
		||||
  // as it is cheaper, more capable, multimodal, and just as fast. gpt-3.5-turbo is still available for use in the API.
 | 
			
		||||
  "gpt-3.5-turbo",
 | 
			
		||||
  "gpt-3.5-turbo-1106",
 | 
			
		||||
  "gpt-3.5-turbo-0125",
 | 
			
		||||
  "gpt-4",
 | 
			
		||||
  "gpt-4-0613",
 | 
			
		||||
  "gpt-4-32k",
 | 
			
		||||
  "gpt-4-32k-0613",
 | 
			
		||||
  "gpt-4-turbo",
 | 
			
		||||
  "gpt-4-turbo-preview",
 | 
			
		||||
  "gpt-4.1",
 | 
			
		||||
  "gpt-4.1-2025-04-14",
 | 
			
		||||
  "gpt-4.1-mini",
 | 
			
		||||
  "gpt-4.1-mini-2025-04-14",
 | 
			
		||||
  "gpt-4.1-nano",
 | 
			
		||||
  "gpt-4.1-nano-2025-04-14",
 | 
			
		||||
  "gpt-4.5-preview",
 | 
			
		||||
  "gpt-4.5-preview-2025-02-27",
 | 
			
		||||
  "gpt-4o",
 | 
			
		||||
  "gpt-4o-2024-05-13",
 | 
			
		||||
  "gpt-4o-2024-08-06",
 | 
			
		||||
  "gpt-4o-2024-11-20",
 | 
			
		||||
  "chatgpt-4o-latest",
 | 
			
		||||
  "gpt-4o-mini",
 | 
			
		||||
  "gpt-4o-mini-2024-07-18",
 | 
			
		||||
  "gpt-4-vision-preview",
 | 
			
		||||
  "gpt-4-turbo-2024-04-09",
 | 
			
		||||
  "gpt-4-1106-preview",
 | 
			
		||||
  "dall-e-3",
 | 
			
		||||
  "o1-mini",
 | 
			
		||||
  "o1-preview",
 | 
			
		||||
  "o1",
 | 
			
		||||
  "o3-mini",
 | 
			
		||||
  "gpt-4.5-preview",
 | 
			
		||||
  "o3",
 | 
			
		||||
  "o4-mini",
 | 
			
		||||
];
 | 
			
		||||
 | 
			
		||||
const googleModels = [
 | 
			
		||||
  "gemini-1.5-pro-latest",
 | 
			
		||||
  "gemini-1.5-pro",
 | 
			
		||||
  "gemini-1.5-pro-002",
 | 
			
		||||
  "gemini-1.5-flash-latest",
 | 
			
		||||
  "gemini-1.5-flash-8b-latest",
 | 
			
		||||
  "gemini-1.5-flash",
 | 
			
		||||
  "gemini-1.5-flash-8b",
 | 
			
		||||
  "gemini-1.5-flash-002",
 | 
			
		||||
  "learnlm-1.5-pro-experimental",
 | 
			
		||||
  "gemini-exp-1206",
 | 
			
		||||
  "gemini-2.0-flash",
 | 
			
		||||
  "gemini-2.0-flash-lite",
 | 
			
		||||
  "gemini-2.5-pro-exp-03-25",
 | 
			
		||||
  "gemini-2.0-flash-exp",
 | 
			
		||||
  "gemini-2.0-flash-lite-preview-02-05",
 | 
			
		||||
  "gemini-2.0-flash-thinking-exp",
 | 
			
		||||
  "gemini-2.0-flash-thinking-exp-1219",
 | 
			
		||||
  "gemini-2.0-flash-thinking-exp-01-21",
 | 
			
		||||
  "gemini-2.0-pro-exp",
 | 
			
		||||
  "gemini-2.0-pro-exp-02-05",
 | 
			
		||||
  "gemini-2.5-pro-preview-06-05",
 | 
			
		||||
];
 | 
			
		||||
 | 
			
		||||
const anthropicModels = [
 | 
			
		||||
  "claude-instant-1.2",
 | 
			
		||||
  "claude-2.0",
 | 
			
		||||
  "claude-2.1",
 | 
			
		||||
  "claude-3-sonnet-20240229",
 | 
			
		||||
  "claude-3-opus-20240229",
 | 
			
		||||
  "claude-3-opus-latest",
 | 
			
		||||
  "claude-3-haiku-20240307",
 | 
			
		||||
  "claude-3-5-haiku-20241022",
 | 
			
		||||
  "claude-3-5-haiku-latest",
 | 
			
		||||
  "claude-3-5-sonnet-20240620",
 | 
			
		||||
  "claude-3-5-sonnet-20241022",
 | 
			
		||||
  "claude-3-5-sonnet-latest",
 | 
			
		||||
  "claude-3-7-sonnet-20250219",
 | 
			
		||||
  "claude-3-7-sonnet-latest",
 | 
			
		||||
];
 | 
			
		||||
 | 
			
		||||
@@ -555,6 +628,18 @@ const xAIModes = [
 | 
			
		||||
  "grok-2-vision-1212",
 | 
			
		||||
  "grok-2-vision",
 | 
			
		||||
  "grok-2-vision-latest",
 | 
			
		||||
  "grok-3-mini-fast-beta",
 | 
			
		||||
  "grok-3-mini-fast",
 | 
			
		||||
  "grok-3-mini-fast-latest",
 | 
			
		||||
  "grok-3-mini-beta",
 | 
			
		||||
  "grok-3-mini",
 | 
			
		||||
  "grok-3-mini-latest",
 | 
			
		||||
  "grok-3-fast-beta",
 | 
			
		||||
  "grok-3-fast",
 | 
			
		||||
  "grok-3-fast-latest",
 | 
			
		||||
  "grok-3-beta",
 | 
			
		||||
  "grok-3",
 | 
			
		||||
  "grok-3-latest",
 | 
			
		||||
];
 | 
			
		||||
 | 
			
		||||
const chatglmModels = [
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										13
									
								
								app/utils.ts
									
									
									
									
									
								
							
							
						
						
									
										13
									
								
								app/utils.ts
									
									
									
									
									
								
							@@ -304,18 +304,9 @@ export function getTimeoutMSByModel(model: string) {
 | 
			
		||||
    model.startsWith("o1") ||
 | 
			
		||||
    model.startsWith("o3") ||
 | 
			
		||||
    model.includes("deepseek-r") ||
 | 
			
		||||
    model.includes("-thinking") ||
 | 
			
		||||
    model.includes("pro")
 | 
			
		||||
  ) {
 | 
			
		||||
    console.log(
 | 
			
		||||
      "thinking model is " +
 | 
			
		||||
        model +
 | 
			
		||||
        " timeout is " +
 | 
			
		||||
        REQUEST_TIMEOUT_MS_FOR_THINKING,
 | 
			
		||||
    );
 | 
			
		||||
    model.includes("-thinking")
 | 
			
		||||
  )
 | 
			
		||||
    return REQUEST_TIMEOUT_MS_FOR_THINKING;
 | 
			
		||||
  }
 | 
			
		||||
  console.log("normal model is " + model + " timeout is " + REQUEST_TIMEOUT_MS);
 | 
			
		||||
  return REQUEST_TIMEOUT_MS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										17486
									
								
								package-lock.json
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										17486
									
								
								package-lock.json
									
									
									
										generated
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -83,7 +83,7 @@
 | 
			
		||||
    "jest": "^29.7.0",
 | 
			
		||||
    "jest-environment-jsdom": "^29.7.0",
 | 
			
		||||
    "lint-staged": "^13.2.2",
 | 
			
		||||
    "prettier": "^3.0.2",
 | 
			
		||||
    "prettier": "^3.6.2",
 | 
			
		||||
    "ts-node": "^10.9.2",
 | 
			
		||||
    "tsx": "^4.16.0",
 | 
			
		||||
    "typescript": "5.2.2",
 | 
			
		||||
 
 | 
			
		||||
@@ -3,7 +3,7 @@ import { isModelNotavailableInServer } from "../app/utils/model";
 | 
			
		||||
describe("isModelNotavailableInServer", () => {
 | 
			
		||||
  test("test model will return false, which means the model is available", () => {
 | 
			
		||||
    const customModels = "";
 | 
			
		||||
    const modelName = "gpt-4o";
 | 
			
		||||
    const modelName = "gpt-4";
 | 
			
		||||
    const providerNames = "OpenAI";
 | 
			
		||||
    const result = isModelNotavailableInServer(
 | 
			
		||||
      customModels,
 | 
			
		||||
 
 | 
			
		||||
@@ -15,11 +15,10 @@ describe("isVisionModel", () => {
 | 
			
		||||
 | 
			
		||||
  test("should identify vision models using regex patterns", () => {
 | 
			
		||||
    const visionModels = [
 | 
			
		||||
      "gpt-4o",
 | 
			
		||||
      "gpt-4-vision",
 | 
			
		||||
      "claude-3-opus",
 | 
			
		||||
      "gemini-1.5-pro",
 | 
			
		||||
      "gemini-2.0",
 | 
			
		||||
      "gemini-2.5-pro",
 | 
			
		||||
      "gemini-exp-vision",
 | 
			
		||||
      "learnlm-vision",
 | 
			
		||||
      "qwen-vl-max",
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user