mirror of
				https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
				synced 2025-11-04 00:03:46 +08:00 
			
		
		
		
	
		
			Some checks failed
		
		
	
	Run Tests / test (push) Has been cancelled
				
			feat(alibaba): Added alibaba vision model and omni model support
		
			
				
	
	
		
			831 lines
		
	
	
		
			21 KiB
		
	
	
	
		
			TypeScript
		
	
	
	
	
	
			
		
		
	
	
			831 lines
		
	
	
		
			21 KiB
		
	
	
	
		
			TypeScript
		
	
	
	
	
	
export const OWNER = "ChatGPTNextWeb";
 | 
						||
export const REPO = "ChatGPT-Next-Web";
 | 
						||
export const REPO_URL = `https://github.com/${OWNER}/${REPO}`;
 | 
						||
export const PLUGINS_REPO_URL = `https://github.com/${OWNER}/NextChat-Awesome-Plugins`;
 | 
						||
export const ISSUE_URL = `https://github.com/${OWNER}/${REPO}/issues`;
 | 
						||
export const UPDATE_URL = `${REPO_URL}#keep-updated`;
 | 
						||
export const RELEASE_URL = `${REPO_URL}/releases`;
 | 
						||
export const FETCH_COMMIT_URL = `https://api.github.com/repos/${OWNER}/${REPO}/commits?per_page=1`;
 | 
						||
export const FETCH_TAG_URL = `https://api.github.com/repos/${OWNER}/${REPO}/tags?per_page=1`;
 | 
						||
export const RUNTIME_CONFIG_DOM = "danger-runtime-config";
 | 
						||
 | 
						||
export const STABILITY_BASE_URL = "https://api.stability.ai";
 | 
						||
 | 
						||
export const OPENAI_BASE_URL = "https://api.openai.com";
 | 
						||
export const ANTHROPIC_BASE_URL = "https://api.anthropic.com";
 | 
						||
 | 
						||
export const GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/";
 | 
						||
 | 
						||
export const BAIDU_BASE_URL = "https://aip.baidubce.com";
 | 
						||
export const BAIDU_OATUH_URL = `${BAIDU_BASE_URL}/oauth/2.0/token`;
 | 
						||
 | 
						||
export const BYTEDANCE_BASE_URL = "https://ark.cn-beijing.volces.com";
 | 
						||
 | 
						||
export const ALIBABA_BASE_URL = "https://dashscope.aliyuncs.com/api/";
 | 
						||
 | 
						||
export const TENCENT_BASE_URL = "https://hunyuan.tencentcloudapi.com";
 | 
						||
 | 
						||
export const MOONSHOT_BASE_URL = "https://api.moonshot.cn";
 | 
						||
export const IFLYTEK_BASE_URL = "https://spark-api-open.xf-yun.com";
 | 
						||
 | 
						||
export const DEEPSEEK_BASE_URL = "https://api.deepseek.com";
 | 
						||
 | 
						||
export const XAI_BASE_URL = "https://api.x.ai";
 | 
						||
 | 
						||
export const CHATGLM_BASE_URL = "https://open.bigmodel.cn";
 | 
						||
 | 
						||
export const SILICONFLOW_BASE_URL = "https://api.siliconflow.cn";
 | 
						||
 | 
						||
export const CACHE_URL_PREFIX = "/api/cache";
 | 
						||
export const UPLOAD_URL = `${CACHE_URL_PREFIX}/upload`;
 | 
						||
 | 
						||
export enum Path {
 | 
						||
  Home = "/",
 | 
						||
  Chat = "/chat",
 | 
						||
  Settings = "/settings",
 | 
						||
  NewChat = "/new-chat",
 | 
						||
  Masks = "/masks",
 | 
						||
  Plugins = "/plugins",
 | 
						||
  Auth = "/auth",
 | 
						||
  Sd = "/sd",
 | 
						||
  SdNew = "/sd-new",
 | 
						||
  Artifacts = "/artifacts",
 | 
						||
  SearchChat = "/search-chat",
 | 
						||
  McpMarket = "/mcp-market",
 | 
						||
}
 | 
						||
 | 
						||
export enum ApiPath {
 | 
						||
  Cors = "",
 | 
						||
  Azure = "/api/azure",
 | 
						||
  OpenAI = "/api/openai",
 | 
						||
  Anthropic = "/api/anthropic",
 | 
						||
  Google = "/api/google",
 | 
						||
  Baidu = "/api/baidu",
 | 
						||
  ByteDance = "/api/bytedance",
 | 
						||
  Alibaba = "/api/alibaba",
 | 
						||
  Tencent = "/api/tencent",
 | 
						||
  Moonshot = "/api/moonshot",
 | 
						||
  Iflytek = "/api/iflytek",
 | 
						||
  Stability = "/api/stability",
 | 
						||
  Artifacts = "/api/artifacts",
 | 
						||
  XAI = "/api/xai",
 | 
						||
  ChatGLM = "/api/chatglm",
 | 
						||
  DeepSeek = "/api/deepseek",
 | 
						||
  SiliconFlow = "/api/siliconflow",
 | 
						||
}
 | 
						||
 | 
						||
export enum SlotID {
 | 
						||
  AppBody = "app-body",
 | 
						||
  CustomModel = "custom-model",
 | 
						||
}
 | 
						||
 | 
						||
export enum FileName {
 | 
						||
  Masks = "masks.json",
 | 
						||
  Prompts = "prompts.json",
 | 
						||
}
 | 
						||
 | 
						||
export enum StoreKey {
 | 
						||
  Chat = "chat-next-web-store",
 | 
						||
  Plugin = "chat-next-web-plugin",
 | 
						||
  Access = "access-control",
 | 
						||
  Config = "app-config",
 | 
						||
  Mask = "mask-store",
 | 
						||
  Prompt = "prompt-store",
 | 
						||
  Update = "chat-update",
 | 
						||
  Sync = "sync",
 | 
						||
  SdList = "sd-list",
 | 
						||
  Mcp = "mcp-store",
 | 
						||
}
 | 
						||
 | 
						||
export const DEFAULT_SIDEBAR_WIDTH = 300;
 | 
						||
export const MAX_SIDEBAR_WIDTH = 500;
 | 
						||
export const MIN_SIDEBAR_WIDTH = 230;
 | 
						||
export const NARROW_SIDEBAR_WIDTH = 100;
 | 
						||
 | 
						||
export const ACCESS_CODE_PREFIX = "nk-";
 | 
						||
 | 
						||
export const LAST_INPUT_KEY = "last-input";
 | 
						||
export const UNFINISHED_INPUT = (id: string) => "unfinished-input-" + id;
 | 
						||
 | 
						||
export const STORAGE_KEY = "chatgpt-next-web";
 | 
						||
 | 
						||
export const REQUEST_TIMEOUT_MS = 60000;
 | 
						||
export const REQUEST_TIMEOUT_MS_FOR_THINKING = REQUEST_TIMEOUT_MS * 5;
 | 
						||
 | 
						||
export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown";
 | 
						||
 | 
						||
export enum ServiceProvider {
 | 
						||
  OpenAI = "OpenAI",
 | 
						||
  Azure = "Azure",
 | 
						||
  Google = "Google",
 | 
						||
  Anthropic = "Anthropic",
 | 
						||
  Baidu = "Baidu",
 | 
						||
  ByteDance = "ByteDance",
 | 
						||
  Alibaba = "Alibaba",
 | 
						||
  Tencent = "Tencent",
 | 
						||
  Moonshot = "Moonshot",
 | 
						||
  Stability = "Stability",
 | 
						||
  Iflytek = "Iflytek",
 | 
						||
  XAI = "XAI",
 | 
						||
  ChatGLM = "ChatGLM",
 | 
						||
  DeepSeek = "DeepSeek",
 | 
						||
  SiliconFlow = "SiliconFlow",
 | 
						||
}
 | 
						||
 | 
						||
// Google API safety settings, see https://ai.google.dev/gemini-api/docs/safety-settings
 | 
						||
// BLOCK_NONE will not block any content, and BLOCK_ONLY_HIGH will block only high-risk content.
 | 
						||
export enum GoogleSafetySettingsThreshold {
 | 
						||
  BLOCK_NONE = "BLOCK_NONE",
 | 
						||
  BLOCK_ONLY_HIGH = "BLOCK_ONLY_HIGH",
 | 
						||
  BLOCK_MEDIUM_AND_ABOVE = "BLOCK_MEDIUM_AND_ABOVE",
 | 
						||
  BLOCK_LOW_AND_ABOVE = "BLOCK_LOW_AND_ABOVE",
 | 
						||
}
 | 
						||
 | 
						||
export enum ModelProvider {
 | 
						||
  Stability = "Stability",
 | 
						||
  GPT = "GPT",
 | 
						||
  GeminiPro = "GeminiPro",
 | 
						||
  Claude = "Claude",
 | 
						||
  Ernie = "Ernie",
 | 
						||
  Doubao = "Doubao",
 | 
						||
  Qwen = "Qwen",
 | 
						||
  Hunyuan = "Hunyuan",
 | 
						||
  Moonshot = "Moonshot",
 | 
						||
  Iflytek = "Iflytek",
 | 
						||
  XAI = "XAI",
 | 
						||
  ChatGLM = "ChatGLM",
 | 
						||
  DeepSeek = "DeepSeek",
 | 
						||
  SiliconFlow = "SiliconFlow",
 | 
						||
}
 | 
						||
 | 
						||
export const Stability = {
 | 
						||
  GeneratePath: "v2beta/stable-image/generate",
 | 
						||
  ExampleEndpoint: "https://api.stability.ai",
 | 
						||
};
 | 
						||
 | 
						||
export const Anthropic = {
 | 
						||
  ChatPath: "v1/messages",
 | 
						||
  ChatPath1: "v1/complete",
 | 
						||
  ExampleEndpoint: "https://api.anthropic.com",
 | 
						||
  Vision: "2023-06-01",
 | 
						||
};
 | 
						||
 | 
						||
export const OpenaiPath = {
 | 
						||
  ChatPath: "v1/chat/completions",
 | 
						||
  SpeechPath: "v1/audio/speech",
 | 
						||
  ImagePath: "v1/images/generations",
 | 
						||
  UsagePath: "dashboard/billing/usage",
 | 
						||
  SubsPath: "dashboard/billing/subscription",
 | 
						||
  ListModelPath: "v1/models",
 | 
						||
};
 | 
						||
 | 
						||
export const Azure = {
 | 
						||
  ChatPath: (deployName: string, apiVersion: string) =>
 | 
						||
    `deployments/${deployName}/chat/completions?api-version=${apiVersion}`,
 | 
						||
  // https://<your_resource_name>.openai.azure.com/openai/deployments/<your_deployment_name>/images/generations?api-version=<api_version>
 | 
						||
  ImagePath: (deployName: string, apiVersion: string) =>
 | 
						||
    `deployments/${deployName}/images/generations?api-version=${apiVersion}`,
 | 
						||
  ExampleEndpoint: "https://{resource-url}/openai",
 | 
						||
};
 | 
						||
 | 
						||
export const Google = {
 | 
						||
  ExampleEndpoint: "https://generativelanguage.googleapis.com/",
 | 
						||
  ChatPath: (modelName: string) =>
 | 
						||
    `v1beta/models/${modelName}:streamGenerateContent`,
 | 
						||
};
 | 
						||
 | 
						||
export const Baidu = {
 | 
						||
  ExampleEndpoint: BAIDU_BASE_URL,
 | 
						||
  ChatPath: (modelName: string) => {
 | 
						||
    let endpoint = modelName;
 | 
						||
    if (modelName === "ernie-4.0-8k") {
 | 
						||
      endpoint = "completions_pro";
 | 
						||
    }
 | 
						||
    if (modelName === "ernie-4.0-8k-preview-0518") {
 | 
						||
      endpoint = "completions_adv_pro";
 | 
						||
    }
 | 
						||
    if (modelName === "ernie-3.5-8k") {
 | 
						||
      endpoint = "completions";
 | 
						||
    }
 | 
						||
    if (modelName === "ernie-speed-8k") {
 | 
						||
      endpoint = "ernie_speed";
 | 
						||
    }
 | 
						||
    return `rpc/2.0/ai_custom/v1/wenxinworkshop/chat/${endpoint}`;
 | 
						||
  },
 | 
						||
};
 | 
						||
 | 
						||
export const ByteDance = {
 | 
						||
  ExampleEndpoint: "https://ark.cn-beijing.volces.com/api/",
 | 
						||
  ChatPath: "api/v3/chat/completions",
 | 
						||
};
 | 
						||
 | 
						||
export const Alibaba = {
 | 
						||
  ExampleEndpoint: ALIBABA_BASE_URL,
 | 
						||
  ChatPath: (modelName: string) => {
 | 
						||
    if (modelName.includes("vl") || modelName.includes("omni")) {
 | 
						||
      return "v1/services/aigc/multimodal-generation/generation";
 | 
						||
    }
 | 
						||
    return `v1/services/aigc/text-generation/generation`;
 | 
						||
  },
 | 
						||
};
 | 
						||
 | 
						||
export const Tencent = {
 | 
						||
  ExampleEndpoint: TENCENT_BASE_URL,
 | 
						||
};
 | 
						||
 | 
						||
export const Moonshot = {
 | 
						||
  ExampleEndpoint: MOONSHOT_BASE_URL,
 | 
						||
  ChatPath: "v1/chat/completions",
 | 
						||
};
 | 
						||
 | 
						||
export const Iflytek = {
 | 
						||
  ExampleEndpoint: IFLYTEK_BASE_URL,
 | 
						||
  ChatPath: "v1/chat/completions",
 | 
						||
};
 | 
						||
 | 
						||
export const DeepSeek = {
 | 
						||
  ExampleEndpoint: DEEPSEEK_BASE_URL,
 | 
						||
  ChatPath: "chat/completions",
 | 
						||
};
 | 
						||
 | 
						||
export const XAI = {
 | 
						||
  ExampleEndpoint: XAI_BASE_URL,
 | 
						||
  ChatPath: "v1/chat/completions",
 | 
						||
};
 | 
						||
 | 
						||
export const ChatGLM = {
 | 
						||
  ExampleEndpoint: CHATGLM_BASE_URL,
 | 
						||
  ChatPath: "api/paas/v4/chat/completions",
 | 
						||
  ImagePath: "api/paas/v4/images/generations",
 | 
						||
  VideoPath: "api/paas/v4/videos/generations",
 | 
						||
};
 | 
						||
 | 
						||
export const SiliconFlow = {
 | 
						||
  ExampleEndpoint: SILICONFLOW_BASE_URL,
 | 
						||
  ChatPath: "v1/chat/completions",
 | 
						||
  ListModelPath: "v1/models?&sub_type=chat",
 | 
						||
};
 | 
						||
 | 
						||
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
 | 
						||
// export const DEFAULT_SYSTEM_TEMPLATE = `
 | 
						||
// You are ChatGPT, a large language model trained by {{ServiceProvider}}.
 | 
						||
// Knowledge cutoff: {{cutoff}}
 | 
						||
// Current model: {{model}}
 | 
						||
// Current time: {{time}}
 | 
						||
// Latex inline: $x^2$
 | 
						||
// Latex block: $$e=mc^2$$
 | 
						||
// `;
 | 
						||
export const DEFAULT_SYSTEM_TEMPLATE = `
 | 
						||
You are ChatGPT, a large language model trained by {{ServiceProvider}}.
 | 
						||
Knowledge cutoff: {{cutoff}}
 | 
						||
Current model: {{model}}
 | 
						||
Current time: {{time}}
 | 
						||
Latex inline: \\(x^2\\) 
 | 
						||
Latex block: $$e=mc^2$$
 | 
						||
`;
 | 
						||
 | 
						||
export const MCP_TOOLS_TEMPLATE = `
 | 
						||
[clientId]
 | 
						||
{{ clientId }}
 | 
						||
[tools]
 | 
						||
{{ tools }}
 | 
						||
`;
 | 
						||
 | 
						||
export const MCP_SYSTEM_TEMPLATE = `
 | 
						||
You are an AI assistant with access to system tools. Your role is to help users by combining natural language understanding with tool operations when needed.
 | 
						||
 | 
						||
1. AVAILABLE TOOLS:
 | 
						||
{{ MCP_TOOLS }}
 | 
						||
 | 
						||
2. WHEN TO USE TOOLS:
 | 
						||
   - ALWAYS USE TOOLS when they can help answer user questions
 | 
						||
   - DO NOT just describe what you could do - TAKE ACTION immediately
 | 
						||
   - If you're not sure whether to use a tool, USE IT
 | 
						||
   - Common triggers for tool use:
 | 
						||
     * Questions about files or directories
 | 
						||
     * Requests to check, list, or manipulate system resources
 | 
						||
     * Any query that can be answered with available tools
 | 
						||
 | 
						||
3. HOW TO USE TOOLS:
 | 
						||
   A. Tool Call Format:
 | 
						||
      - Use markdown code blocks with format: \`\`\`json:mcp:{clientId}\`\`\`
 | 
						||
      - Always include:
 | 
						||
        * method: "tools/call"(Only this method is supported)
 | 
						||
        * params: 
 | 
						||
          - name: must match an available primitive name
 | 
						||
          - arguments: required parameters for the primitive
 | 
						||
 | 
						||
   B. Response Format:
 | 
						||
      - Tool responses will come as user messages
 | 
						||
      - Format: \`\`\`json:mcp-response:{clientId}\`\`\`
 | 
						||
      - Wait for response before making another tool call
 | 
						||
 | 
						||
   C. Important Rules:
 | 
						||
      - Only use tools/call method
 | 
						||
      - Only ONE tool call per message
 | 
						||
      - ALWAYS TAKE ACTION instead of just describing what you could do
 | 
						||
      - Include the correct clientId in code block language tag
 | 
						||
      - Verify arguments match the primitive's requirements
 | 
						||
 | 
						||
4. INTERACTION FLOW:
 | 
						||
   A. When user makes a request:
 | 
						||
      - IMMEDIATELY use appropriate tool if available
 | 
						||
      - DO NOT ask if user wants you to use the tool
 | 
						||
      - DO NOT just describe what you could do
 | 
						||
   B. After receiving tool response:
 | 
						||
      - Explain results clearly
 | 
						||
      - Take next appropriate action if needed
 | 
						||
   C. If tools fail:
 | 
						||
      - Explain the error
 | 
						||
      - Try alternative approach immediately
 | 
						||
 | 
						||
5. EXAMPLE INTERACTION:
 | 
						||
 | 
						||
  good example:
 | 
						||
 | 
						||
   \`\`\`json:mcp:filesystem
 | 
						||
   {
 | 
						||
     "method": "tools/call",
 | 
						||
     "params": {
 | 
						||
       "name": "list_allowed_directories",
 | 
						||
       "arguments": {}
 | 
						||
     }
 | 
						||
   }
 | 
						||
   \`\`\`"
 | 
						||
 | 
						||
 | 
						||
  \`\`\`json:mcp-response:filesystem
 | 
						||
  {
 | 
						||
  "method": "tools/call",
 | 
						||
  "params": {
 | 
						||
    "name": "write_file",
 | 
						||
    "arguments": {
 | 
						||
      "path": "/Users/river/dev/nextchat/test/joke.txt",
 | 
						||
      "content": "为什么数学书总是感到忧伤?因为它有太多的问题。"
 | 
						||
    }
 | 
						||
  }
 | 
						||
  }
 | 
						||
\`\`\`
 | 
						||
 | 
						||
   follwing is the wrong! mcp json example:
 | 
						||
 | 
						||
   \`\`\`json:mcp:filesystem
 | 
						||
   {
 | 
						||
      "method": "write_file",
 | 
						||
      "params": {
 | 
						||
        "path": "NextChat_Information.txt",
 | 
						||
        "content": "1"
 | 
						||
    }
 | 
						||
   }
 | 
						||
   \`\`\`
 | 
						||
 | 
						||
   This is wrong because the method is not tools/call.
 | 
						||
   
 | 
						||
   \`\`\`{
 | 
						||
  "method": "search_repositories",
 | 
						||
  "params": {
 | 
						||
    "query": "2oeee"
 | 
						||
  }
 | 
						||
}
 | 
						||
   \`\`\`
 | 
						||
 | 
						||
   This is wrong because the method is not tools/call.!!!!!!!!!!!
 | 
						||
 | 
						||
   the right format is:
 | 
						||
   \`\`\`json:mcp:filesystem
 | 
						||
   {
 | 
						||
     "method": "tools/call",
 | 
						||
     "params": {
 | 
						||
       "name": "search_repositories",
 | 
						||
       "arguments": {
 | 
						||
         "query": "2oeee"
 | 
						||
       }
 | 
						||
     }
 | 
						||
   }
 | 
						||
   \`\`\`
 | 
						||
   
 | 
						||
   please follow the format strictly ONLY use tools/call method!!!!!!!!!!!
 | 
						||
   
 | 
						||
`;
 | 
						||
 | 
						||
export const SUMMARIZE_MODEL = "gpt-4o-mini";
 | 
						||
export const GEMINI_SUMMARIZE_MODEL = "gemini-pro";
 | 
						||
export const DEEPSEEK_SUMMARIZE_MODEL = "deepseek-chat";
 | 
						||
 | 
						||
export const KnowledgeCutOffDate: Record<string, string> = {
 | 
						||
  default: "2021-09",
 | 
						||
  "gpt-4-turbo": "2023-12",
 | 
						||
  "gpt-4-turbo-2024-04-09": "2023-12",
 | 
						||
  "gpt-4-turbo-preview": "2023-12",
 | 
						||
  "gpt-4o": "2023-10",
 | 
						||
  "gpt-4o-2024-05-13": "2023-10",
 | 
						||
  "gpt-4o-2024-08-06": "2023-10",
 | 
						||
  "gpt-4o-2024-11-20": "2023-10",
 | 
						||
  "chatgpt-4o-latest": "2023-10",
 | 
						||
  "gpt-4o-mini": "2023-10",
 | 
						||
  "gpt-4o-mini-2024-07-18": "2023-10",
 | 
						||
  "gpt-4-vision-preview": "2023-04",
 | 
						||
  "o1-mini-2024-09-12": "2023-10",
 | 
						||
  "o1-mini": "2023-10",
 | 
						||
  "o1-preview-2024-09-12": "2023-10",
 | 
						||
  "o1-preview": "2023-10",
 | 
						||
  "o1-2024-12-17": "2023-10",
 | 
						||
  o1: "2023-10",
 | 
						||
  "o3-mini-2025-01-31": "2023-10",
 | 
						||
  "o3-mini": "2023-10",
 | 
						||
  // After improvements,
 | 
						||
  // it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
 | 
						||
  "gemini-pro": "2023-12",
 | 
						||
  "gemini-pro-vision": "2023-12",
 | 
						||
  "deepseek-chat": "2024-07",
 | 
						||
  "deepseek-coder": "2024-07",
 | 
						||
};
 | 
						||
 | 
						||
export const DEFAULT_TTS_ENGINE = "OpenAI-TTS";
 | 
						||
export const DEFAULT_TTS_ENGINES = ["OpenAI-TTS", "Edge-TTS"];
 | 
						||
export const DEFAULT_TTS_MODEL = "tts-1";
 | 
						||
export const DEFAULT_TTS_VOICE = "alloy";
 | 
						||
export const DEFAULT_TTS_MODELS = ["tts-1", "tts-1-hd"];
 | 
						||
export const DEFAULT_TTS_VOICES = [
 | 
						||
  "alloy",
 | 
						||
  "echo",
 | 
						||
  "fable",
 | 
						||
  "onyx",
 | 
						||
  "nova",
 | 
						||
  "shimmer",
 | 
						||
];
 | 
						||
 | 
						||
export const VISION_MODEL_REGEXES = [
 | 
						||
  /vision/,
 | 
						||
  /gpt-4o/,
 | 
						||
  /claude-3/,
 | 
						||
  /gemini-1\.5/,
 | 
						||
  /gemini-exp/,
 | 
						||
  /gemini-2\.0/,
 | 
						||
  /learnlm/,
 | 
						||
  /qwen-vl/,
 | 
						||
  /qwen2-vl/,
 | 
						||
  /gpt-4-turbo(?!.*preview)/, // Matches "gpt-4-turbo" but not "gpt-4-turbo-preview"
 | 
						||
  /^dall-e-3$/, // Matches exactly "dall-e-3"
 | 
						||
  /glm-4v/,
 | 
						||
  /vl/i,
 | 
						||
];
 | 
						||
 | 
						||
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
 | 
						||
 | 
						||
const openaiModels = [
 | 
						||
  // As of July 2024, gpt-4o-mini should be used in place of gpt-3.5-turbo,
 | 
						||
  // as it is cheaper, more capable, multimodal, and just as fast. gpt-3.5-turbo is still available for use in the API.
 | 
						||
  "gpt-3.5-turbo",
 | 
						||
  "gpt-3.5-turbo-1106",
 | 
						||
  "gpt-3.5-turbo-0125",
 | 
						||
  "gpt-4",
 | 
						||
  "gpt-4-0613",
 | 
						||
  "gpt-4-32k",
 | 
						||
  "gpt-4-32k-0613",
 | 
						||
  "gpt-4-turbo",
 | 
						||
  "gpt-4-turbo-preview",
 | 
						||
  "gpt-4o",
 | 
						||
  "gpt-4o-2024-05-13",
 | 
						||
  "gpt-4o-2024-08-06",
 | 
						||
  "gpt-4o-2024-11-20",
 | 
						||
  "chatgpt-4o-latest",
 | 
						||
  "gpt-4o-mini",
 | 
						||
  "gpt-4o-mini-2024-07-18",
 | 
						||
  "gpt-4-vision-preview",
 | 
						||
  "gpt-4-turbo-2024-04-09",
 | 
						||
  "gpt-4-1106-preview",
 | 
						||
  "dall-e-3",
 | 
						||
  "o1-mini",
 | 
						||
  "o1-preview",
 | 
						||
  "o3-mini",
 | 
						||
];
 | 
						||
 | 
						||
const googleModels = [
 | 
						||
  "gemini-1.0-pro", // Deprecated on 2/15/2025
 | 
						||
  "gemini-1.5-pro-latest",
 | 
						||
  "gemini-1.5-pro",
 | 
						||
  "gemini-1.5-pro-002",
 | 
						||
  "gemini-1.5-pro-exp-0827",
 | 
						||
  "gemini-1.5-flash-latest",
 | 
						||
  "gemini-1.5-flash-8b-latest",
 | 
						||
  "gemini-1.5-flash",
 | 
						||
  "gemini-1.5-flash-8b",
 | 
						||
  "gemini-1.5-flash-002",
 | 
						||
  "gemini-1.5-flash-exp-0827",
 | 
						||
  "learnlm-1.5-pro-experimental",
 | 
						||
  "gemini-exp-1114",
 | 
						||
  "gemini-exp-1121",
 | 
						||
  "gemini-exp-1206",
 | 
						||
  "gemini-2.0-flash",
 | 
						||
  "gemini-2.0-flash-exp",
 | 
						||
  "gemini-2.0-flash-lite-preview-02-05",
 | 
						||
  "gemini-2.0-flash-thinking-exp",
 | 
						||
  "gemini-2.0-flash-thinking-exp-1219",
 | 
						||
  "gemini-2.0-flash-thinking-exp-01-21",
 | 
						||
  "gemini-2.0-pro-exp",
 | 
						||
  "gemini-2.0-pro-exp-02-05",
 | 
						||
];
 | 
						||
 | 
						||
const anthropicModels = [
 | 
						||
  "claude-instant-1.2",
 | 
						||
  "claude-2.0",
 | 
						||
  "claude-2.1",
 | 
						||
  "claude-3-sonnet-20240229",
 | 
						||
  "claude-3-opus-20240229",
 | 
						||
  "claude-3-opus-latest",
 | 
						||
  "claude-3-haiku-20240307",
 | 
						||
  "claude-3-5-haiku-20241022",
 | 
						||
  "claude-3-5-haiku-latest",
 | 
						||
  "claude-3-5-sonnet-20240620",
 | 
						||
  "claude-3-5-sonnet-20241022",
 | 
						||
  "claude-3-5-sonnet-latest",
 | 
						||
  "claude-3-7-sonnet-20250219",
 | 
						||
  "claude-3-7-sonnet-latest",
 | 
						||
];
 | 
						||
 | 
						||
const baiduModels = [
 | 
						||
  "ernie-4.0-turbo-8k",
 | 
						||
  "ernie-4.0-8k",
 | 
						||
  "ernie-4.0-8k-preview",
 | 
						||
  "ernie-4.0-8k-preview-0518",
 | 
						||
  "ernie-4.0-8k-latest",
 | 
						||
  "ernie-3.5-8k",
 | 
						||
  "ernie-3.5-8k-0205",
 | 
						||
  "ernie-speed-128k",
 | 
						||
  "ernie-speed-8k",
 | 
						||
  "ernie-lite-8k",
 | 
						||
  "ernie-tiny-8k",
 | 
						||
];
 | 
						||
 | 
						||
const bytedanceModels = [
 | 
						||
  "Doubao-lite-4k",
 | 
						||
  "Doubao-lite-32k",
 | 
						||
  "Doubao-lite-128k",
 | 
						||
  "Doubao-pro-4k",
 | 
						||
  "Doubao-pro-32k",
 | 
						||
  "Doubao-pro-128k",
 | 
						||
];
 | 
						||
 | 
						||
const alibabaModes = [
 | 
						||
  "qwen-turbo",
 | 
						||
  "qwen-plus",
 | 
						||
  "qwen-max",
 | 
						||
  "qwen-max-0428",
 | 
						||
  "qwen-max-0403",
 | 
						||
  "qwen-max-0107",
 | 
						||
  "qwen-max-longcontext",
 | 
						||
  "qwen-omni-turbo",
 | 
						||
  "qwen-vl-plus",
 | 
						||
  "qwen-vl-max",
 | 
						||
];
 | 
						||
 | 
						||
const tencentModels = [
 | 
						||
  "hunyuan-pro",
 | 
						||
  "hunyuan-standard",
 | 
						||
  "hunyuan-lite",
 | 
						||
  "hunyuan-role",
 | 
						||
  "hunyuan-functioncall",
 | 
						||
  "hunyuan-code",
 | 
						||
  "hunyuan-vision",
 | 
						||
];
 | 
						||
 | 
						||
const moonshotModes = ["moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"];
 | 
						||
 | 
						||
const iflytekModels = [
 | 
						||
  "general",
 | 
						||
  "generalv3",
 | 
						||
  "pro-128k",
 | 
						||
  "generalv3.5",
 | 
						||
  "4.0Ultra",
 | 
						||
];
 | 
						||
 | 
						||
const deepseekModels = ["deepseek-chat", "deepseek-coder", "deepseek-reasoner"];
 | 
						||
 | 
						||
const xAIModes = [
 | 
						||
  "grok-beta",
 | 
						||
  "grok-2",
 | 
						||
  "grok-2-1212",
 | 
						||
  "grok-2-latest",
 | 
						||
  "grok-vision-beta",
 | 
						||
  "grok-2-vision-1212",
 | 
						||
  "grok-2-vision",
 | 
						||
  "grok-2-vision-latest",
 | 
						||
];
 | 
						||
 | 
						||
const chatglmModels = [
 | 
						||
  "glm-4-plus",
 | 
						||
  "glm-4-0520",
 | 
						||
  "glm-4",
 | 
						||
  "glm-4-air",
 | 
						||
  "glm-4-airx",
 | 
						||
  "glm-4-long",
 | 
						||
  "glm-4-flashx",
 | 
						||
  "glm-4-flash",
 | 
						||
  "glm-4v-plus",
 | 
						||
  "glm-4v",
 | 
						||
  "glm-4v-flash", // free
 | 
						||
  "cogview-3-plus",
 | 
						||
  "cogview-3",
 | 
						||
  "cogview-3-flash", // free
 | 
						||
  // 目前无法适配轮询任务
 | 
						||
  //   "cogvideox",
 | 
						||
  //   "cogvideox-flash", // free
 | 
						||
];
 | 
						||
 | 
						||
const siliconflowModels = [
 | 
						||
  "Qwen/Qwen2.5-7B-Instruct",
 | 
						||
  "Qwen/Qwen2.5-72B-Instruct",
 | 
						||
  "deepseek-ai/DeepSeek-R1",
 | 
						||
  "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
 | 
						||
  "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
 | 
						||
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
 | 
						||
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
 | 
						||
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
 | 
						||
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
 | 
						||
  "deepseek-ai/DeepSeek-V3",
 | 
						||
  "meta-llama/Llama-3.3-70B-Instruct",
 | 
						||
  "THUDM/glm-4-9b-chat",
 | 
						||
  "Pro/deepseek-ai/DeepSeek-R1",
 | 
						||
  "Pro/deepseek-ai/DeepSeek-V3",
 | 
						||
];
 | 
						||
 | 
						||
let seq = 1000; // 内置的模型序号生成器从1000开始
 | 
						||
export const DEFAULT_MODELS = [
 | 
						||
  ...openaiModels.map((name) => ({
 | 
						||
    name,
 | 
						||
    available: true,
 | 
						||
    sorted: seq++, // Global sequence sort(index)
 | 
						||
    provider: {
 | 
						||
      id: "openai",
 | 
						||
      providerName: "OpenAI",
 | 
						||
      providerType: "openai",
 | 
						||
      sorted: 1, // 这里是固定的,确保顺序与之前内置的版本一致
 | 
						||
    },
 | 
						||
  })),
 | 
						||
  ...openaiModels.map((name) => ({
 | 
						||
    name,
 | 
						||
    available: true,
 | 
						||
    sorted: seq++,
 | 
						||
    provider: {
 | 
						||
      id: "azure",
 | 
						||
      providerName: "Azure",
 | 
						||
      providerType: "azure",
 | 
						||
      sorted: 2,
 | 
						||
    },
 | 
						||
  })),
 | 
						||
  ...googleModels.map((name) => ({
 | 
						||
    name,
 | 
						||
    available: true,
 | 
						||
    sorted: seq++,
 | 
						||
    provider: {
 | 
						||
      id: "google",
 | 
						||
      providerName: "Google",
 | 
						||
      providerType: "google",
 | 
						||
      sorted: 3,
 | 
						||
    },
 | 
						||
  })),
 | 
						||
  ...anthropicModels.map((name) => ({
 | 
						||
    name,
 | 
						||
    available: true,
 | 
						||
    sorted: seq++,
 | 
						||
    provider: {
 | 
						||
      id: "anthropic",
 | 
						||
      providerName: "Anthropic",
 | 
						||
      providerType: "anthropic",
 | 
						||
      sorted: 4,
 | 
						||
    },
 | 
						||
  })),
 | 
						||
  ...baiduModels.map((name) => ({
 | 
						||
    name,
 | 
						||
    available: true,
 | 
						||
    sorted: seq++,
 | 
						||
    provider: {
 | 
						||
      id: "baidu",
 | 
						||
      providerName: "Baidu",
 | 
						||
      providerType: "baidu",
 | 
						||
      sorted: 5,
 | 
						||
    },
 | 
						||
  })),
 | 
						||
  ...bytedanceModels.map((name) => ({
 | 
						||
    name,
 | 
						||
    available: true,
 | 
						||
    sorted: seq++,
 | 
						||
    provider: {
 | 
						||
      id: "bytedance",
 | 
						||
      providerName: "ByteDance",
 | 
						||
      providerType: "bytedance",
 | 
						||
      sorted: 6,
 | 
						||
    },
 | 
						||
  })),
 | 
						||
  ...alibabaModes.map((name) => ({
 | 
						||
    name,
 | 
						||
    available: true,
 | 
						||
    sorted: seq++,
 | 
						||
    provider: {
 | 
						||
      id: "alibaba",
 | 
						||
      providerName: "Alibaba",
 | 
						||
      providerType: "alibaba",
 | 
						||
      sorted: 7,
 | 
						||
    },
 | 
						||
  })),
 | 
						||
  ...tencentModels.map((name) => ({
 | 
						||
    name,
 | 
						||
    available: true,
 | 
						||
    sorted: seq++,
 | 
						||
    provider: {
 | 
						||
      id: "tencent",
 | 
						||
      providerName: "Tencent",
 | 
						||
      providerType: "tencent",
 | 
						||
      sorted: 8,
 | 
						||
    },
 | 
						||
  })),
 | 
						||
  ...moonshotModes.map((name) => ({
 | 
						||
    name,
 | 
						||
    available: true,
 | 
						||
    sorted: seq++,
 | 
						||
    provider: {
 | 
						||
      id: "moonshot",
 | 
						||
      providerName: "Moonshot",
 | 
						||
      providerType: "moonshot",
 | 
						||
      sorted: 9,
 | 
						||
    },
 | 
						||
  })),
 | 
						||
  ...iflytekModels.map((name) => ({
 | 
						||
    name,
 | 
						||
    available: true,
 | 
						||
    sorted: seq++,
 | 
						||
    provider: {
 | 
						||
      id: "iflytek",
 | 
						||
      providerName: "Iflytek",
 | 
						||
      providerType: "iflytek",
 | 
						||
      sorted: 10,
 | 
						||
    },
 | 
						||
  })),
 | 
						||
  ...xAIModes.map((name) => ({
 | 
						||
    name,
 | 
						||
    available: true,
 | 
						||
    sorted: seq++,
 | 
						||
    provider: {
 | 
						||
      id: "xai",
 | 
						||
      providerName: "XAI",
 | 
						||
      providerType: "xai",
 | 
						||
      sorted: 11,
 | 
						||
    },
 | 
						||
  })),
 | 
						||
  ...chatglmModels.map((name) => ({
 | 
						||
    name,
 | 
						||
    available: true,
 | 
						||
    sorted: seq++,
 | 
						||
    provider: {
 | 
						||
      id: "chatglm",
 | 
						||
      providerName: "ChatGLM",
 | 
						||
      providerType: "chatglm",
 | 
						||
      sorted: 12,
 | 
						||
    },
 | 
						||
  })),
 | 
						||
  ...deepseekModels.map((name) => ({
 | 
						||
    name,
 | 
						||
    available: true,
 | 
						||
    sorted: seq++,
 | 
						||
    provider: {
 | 
						||
      id: "deepseek",
 | 
						||
      providerName: "DeepSeek",
 | 
						||
      providerType: "deepseek",
 | 
						||
      sorted: 13,
 | 
						||
    },
 | 
						||
  })),
 | 
						||
  ...siliconflowModels.map((name) => ({
 | 
						||
    name,
 | 
						||
    available: true,
 | 
						||
    sorted: seq++,
 | 
						||
    provider: {
 | 
						||
      id: "siliconflow",
 | 
						||
      providerName: "SiliconFlow",
 | 
						||
      providerType: "siliconflow",
 | 
						||
      sorted: 14,
 | 
						||
    },
 | 
						||
  })),
 | 
						||
] as const;
 | 
						||
 | 
						||
export const CHAT_PAGE_SIZE = 15;
 | 
						||
export const MAX_RENDER_MSG_COUNT = 45;
 | 
						||
 | 
						||
// some famous webdav endpoints
 | 
						||
export const internalAllowedWebDavEndpoints = [
 | 
						||
  "https://dav.jianguoyun.com/dav/",
 | 
						||
  "https://dav.dropdav.com/",
 | 
						||
  "https://dav.box.com/dav",
 | 
						||
  "https://nanao.teracloud.jp/dav/",
 | 
						||
  "https://bora.teracloud.jp/dav/",
 | 
						||
  "https://webdav.4shared.com/",
 | 
						||
  "https://dav.idrivesync.com",
 | 
						||
  "https://webdav.yandex.com",
 | 
						||
  "https://app.koofr.net/dav/Koofr",
 | 
						||
];
 | 
						||
 | 
						||
export const DEFAULT_GA_ID = "G-89WN60ZK2E";
 | 
						||
 | 
						||
export const SAAS_CHAT_URL = "https://nextchat.club";
 | 
						||
export const SAAS_CHAT_UTM_URL = "https://nextchat.club?utm=github";
 |