Merge branch 'main' into main

This commit is contained in:
glay
2025-02-06 12:25:47 +08:00
committed by GitHub
17 changed files with 568 additions and 21 deletions

View File

@@ -34,6 +34,8 @@ export const XAI_BASE_URL = "https://api.x.ai";
export const CHATGLM_BASE_URL = "https://open.bigmodel.cn";
export const SILICONFLOW_BASE_URL = "https://api.siliconflow.cn";
export const CACHE_URL_PREFIX = "/api/cache";
export const UPLOAD_URL = `${CACHE_URL_PREFIX}/upload`;
@@ -70,6 +72,7 @@ export enum ApiPath {
XAI = "/api/xai",
ChatGLM = "/api/chatglm",
DeepSeek = "/api/deepseek",
SiliconFlow = "/api/siliconflow",
}
export enum SlotID {
@@ -127,6 +130,7 @@ export enum ServiceProvider {
ChatGLM = "ChatGLM",
DeepSeek = "DeepSeek",
Bedrock = "Bedrock",
SiliconFlow = "SiliconFlow",
}
// Google API safety settings, see https://ai.google.dev/gemini-api/docs/safety-settings
@@ -152,7 +156,8 @@ export enum ModelProvider {
XAI = "XAI",
ChatGLM = "ChatGLM",
DeepSeek = "DeepSeek",
Bedrock = "Bedrock",
Bedrock = "Bedrock",
SiliconFlow = "SiliconFlow",
}
export const Stability = {
@@ -261,6 +266,11 @@ export const Bedrock = {
// Get the region from access store for BEDROCK_BASE_URL
export const BEDROCK_BASE_URL = Bedrock.getEndpoint();
export const SiliconFlow = {
ExampleEndpoint: SILICONFLOW_BASE_URL,
ChatPath: "v1/chat/completions",
};
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
// export const DEFAULT_SYSTEM_TEMPLATE = `
// You are ChatGPT, a large language model trained by {{ServiceProvider}}.
@@ -420,8 +430,14 @@ export const KnowledgeCutOffDate: Record<string, string> = {
"gpt-4o-mini": "2023-10",
"gpt-4o-mini-2024-07-18": "2023-10",
"gpt-4-vision-preview": "2023-04",
"o1-mini-2024-09-12": "2023-10",
"o1-mini": "2023-10",
"o1-preview-2024-09-12": "2023-10",
"o1-preview": "2023-10",
"o1-2024-12-17": "2023-10",
o1: "2023-10",
"o3-mini-2025-01-31": "2023-10",
"o3-mini": "2023-10",
// After improvements,
// it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
"gemini-pro": "2023-12",
@@ -464,6 +480,8 @@ export const VISION_MODEL_REGEXES = [
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
const openaiModels = [
// As of July 2024, gpt-4o-mini should be used in place of gpt-3.5-turbo,
// as it is cheaper, more capable, multimodal, and just as fast. gpt-3.5-turbo is still available for use in the API.
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
@@ -486,6 +504,7 @@ const openaiModels = [
"dall-e-3",
"o1-mini",
"o1-preview",
"o3-mini",
];
const bedrockModels = [
@@ -527,7 +546,9 @@ const googleModels = [
"gemini-exp-1121",
"gemini-exp-1206",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-thinking-exp-1219",
"gemini-2.0-flash-thinking-exp-01-21",
];
const anthropicModels = [
@@ -622,6 +643,21 @@ const chatglmModels = [
// "cogvideox-flash", // free
];
const siliconflowModels = [
"Qwen/Qwen2.5-7B-Instruct",
"Qwen/Qwen2.5-72B-Instruct",
"deepseek-ai/DeepSeek-R1",
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
"deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
"deepseek-ai/DeepSeek-V3",
"meta-llama/Llama-3.3-70B-Instruct",
"THUDM/glm-4-9b-chat",
];
let seq = 1000; // 内置的模型序号生成器从1000开始
export const DEFAULT_MODELS = [
...openaiModels.map((name) => ({
@@ -768,6 +804,17 @@ export const DEFAULT_MODELS = [
sorted: 13,
},
})),
...siliconflowModels.map((name) => ({
name,
available: true,
sorted: seq++,
provider: {
id: "siliconflow",
providerName: "SiliconFlow",
providerType: "siliconflow",
sorted: 14,
},
})),
...bedrockModels.map((name) => ({
name,
available: true,
@@ -776,8 +823,8 @@ export const DEFAULT_MODELS = [
id: "bedrock",
providerName: "Bedrock",
providerType: "bedrock",
sorted: 14,
},
sorted: 15,
},
})),
] as const;