mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-10-12 04:53:44 +08:00
New provider SiliconFlow and Its Latest DeekSeek Models
Update README.md Update constant.ts Update README_CN.md
This commit is contained in:
@@ -34,6 +34,8 @@ export const XAI_BASE_URL = "https://api.x.ai";
|
||||
|
||||
export const CHATGLM_BASE_URL = "https://open.bigmodel.cn";
|
||||
|
||||
export const SILICONFLOW_BASE_URL = "https://api.siliconflow.cn";
|
||||
|
||||
export const CACHE_URL_PREFIX = "/api/cache";
|
||||
export const UPLOAD_URL = `${CACHE_URL_PREFIX}/upload`;
|
||||
|
||||
@@ -69,6 +71,7 @@ export enum ApiPath {
|
||||
XAI = "/api/xai",
|
||||
ChatGLM = "/api/chatglm",
|
||||
DeepSeek = "/api/deepseek",
|
||||
SiliconFlow = "/api/siliconflow",
|
||||
}
|
||||
|
||||
export enum SlotID {
|
||||
@@ -125,6 +128,7 @@ export enum ServiceProvider {
|
||||
XAI = "XAI",
|
||||
ChatGLM = "ChatGLM",
|
||||
DeepSeek = "DeepSeek",
|
||||
SiliconFlow = "SiliconFlow",
|
||||
}
|
||||
|
||||
// Google API safety settings, see https://ai.google.dev/gemini-api/docs/safety-settings
|
||||
@@ -150,6 +154,7 @@ export enum ModelProvider {
|
||||
XAI = "XAI",
|
||||
ChatGLM = "ChatGLM",
|
||||
DeepSeek = "DeepSeek",
|
||||
SiliconFlow = "SiliconFlow",
|
||||
}
|
||||
|
||||
export const Stability = {
|
||||
@@ -249,6 +254,11 @@ export const ChatGLM = {
|
||||
VideoPath: "api/paas/v4/videos/generations",
|
||||
};
|
||||
|
||||
export const SiliconFlow = {
|
||||
ExampleEndpoint: SILICONFLOW_BASE_URL,
|
||||
ChatPath: "v1/chat/completions",
|
||||
};
|
||||
|
||||
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
|
||||
// export const DEFAULT_SYSTEM_TEMPLATE = `
|
||||
// You are ChatGPT, a large language model trained by {{ServiceProvider}}.
|
||||
@@ -413,7 +423,7 @@ export const KnowledgeCutOffDate: Record<string, string> = {
|
||||
"o1-preview-2024-09-12": "2023-10",
|
||||
"o1-preview": "2023-10",
|
||||
"o1-2024-12-17": "2023-10",
|
||||
"o1": "2023-10",
|
||||
o1: "2023-10",
|
||||
"o3-mini-2025-01-31": "2023-10",
|
||||
"o3-mini": "2023-10",
|
||||
// After improvements,
|
||||
@@ -597,6 +607,21 @@ const chatglmModels = [
|
||||
// "cogvideox-flash", // free
|
||||
];
|
||||
|
||||
const siliconflowModels = [
|
||||
"Qwen/Qwen2.5-7B-Instruct",
|
||||
"Qwen/Qwen2.5-72B-Instruct",
|
||||
"deepseek-ai/DeepSeek-R1",
|
||||
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
||||
"deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
|
||||
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
||||
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
||||
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
||||
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
|
||||
"deepseek-ai/DeepSeek-V3",
|
||||
"meta-llama/Llama-3.3-70B-Instruct",
|
||||
"THUDM/glm-4-9b-chat",
|
||||
];
|
||||
|
||||
let seq = 1000; // 内置的模型序号生成器从1000开始
|
||||
export const DEFAULT_MODELS = [
|
||||
...openaiModels.map((name) => ({
|
||||
@@ -742,6 +767,17 @@ export const DEFAULT_MODELS = [
|
||||
sorted: 13,
|
||||
},
|
||||
})),
|
||||
...siliconflowModels.map((name) => ({
|
||||
name,
|
||||
available: true,
|
||||
sorted: seq++,
|
||||
provider: {
|
||||
id: "siliconflow",
|
||||
providerName: "SiliconFlow",
|
||||
providerType: "siliconflow",
|
||||
sorted: 14,
|
||||
},
|
||||
})),
|
||||
] as const;
|
||||
|
||||
export const CHAT_PAGE_SIZE = 15;
|
||||
|
Reference in New Issue
Block a user