diff --git a/.env.template b/.env.template index a9eda4d61..5fac3b47b 100644 --- a/.env.template +++ b/.env.template @@ -73,7 +73,6 @@ ANTHROPIC_API_VERSION= ### anthropic claude Api url (optional) ANTHROPIC_URL= - ### (optional) WHITE_WEBDAV_ENDPOINTS= @@ -83,3 +82,8 @@ AWS_REGION= AWS_ACCESS_KEY=AKIA AWS_SECRET_KEY= +### siliconflow Api key (optional) +SILICONFLOW_API_KEY= + +### siliconflow Api url (optional) +SILICONFLOW_URL= diff --git a/LICENSE b/LICENSE index 047f9431e..4864ab00d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2023-2024 Zhang Yifei +Copyright (c) 2023-2025 NextChat Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts index a2f0660d8..c2f128128 100644 --- a/app/client/platforms/bytedance.ts +++ b/app/client/platforms/bytedance.ts @@ -22,7 +22,7 @@ import { } from "@fortaine/fetch-event-source"; import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent } from "@/app/utils"; +import { preProcessImageContent } from "@/app/utils/chat"; import { fetch } from "@/app/utils/stream"; export interface OpenAIListModelResponse { @@ -84,10 +84,11 @@ export class DoubaoApi implements LLMApi { } async chat(options: ChatOptions) { - const messages = options.messages.map((v) => ({ - role: v.role, - content: getMessageTextContent(v), - })); + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = await preProcessImageContent(v.content); + messages.push({ role: v.role, content }); + } const modelConfig = { ...useAppConfig.getState().modelConfig, diff --git a/app/client/platforms/deepseek.ts b/app/client/platforms/deepseek.ts index 2bf3b2338..c436ae61d 100644 --- a/app/client/platforms/deepseek.ts +++ b/app/client/platforms/deepseek.ts @@ -5,6 +5,7 @@ import { DEEPSEEK_BASE_URL, DeepSeek, REQUEST_TIMEOUT_MS, + REQUEST_TIMEOUT_MS_FOR_THINKING, } from "@/app/constant"; import { useAccessStore, @@ -117,10 +118,14 @@ export class DeepSeekApi implements LLMApi { // console.log(chatPayload); + const isR1 = + options.config.model.endsWith("-reasoner") || + options.config.model.endsWith("-r1"); + // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS, + isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS, ); if (shouldStream) { diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index 5ca8e1071..1e593dd42 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -1,4 +1,9 @@ -import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; +import { + ApiPath, + Google, + REQUEST_TIMEOUT_MS, + REQUEST_TIMEOUT_MS_FOR_THINKING, +} from "@/app/constant"; import { ChatOptions, getHeaders, @@ -69,9 +74,16 @@ export class GeminiProApi implements LLMApi { .join("\n\n"); }; + let content = ""; + if (Array.isArray(res)) { + res.map((item) => { + content += getTextFromParts(item?.candidates?.at(0)?.content?.parts); + }); + } + return ( getTextFromParts(res?.candidates?.at(0)?.content?.parts) || - getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) || + content || //getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) || res?.error?.message || "" ); @@ -190,10 +202,11 @@ export class GeminiProApi implements LLMApi { headers: getHeaders(), }; + const isThinking = options.config.model.includes("-thinking"); // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS, + isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS, ); if (shouldStream) { diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 467bb82e0..fbe533cad 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -8,6 +8,7 @@ import { Azure, REQUEST_TIMEOUT_MS, ServiceProvider, + REQUEST_TIMEOUT_MS_FOR_THINKING, } from "@/app/constant"; import { ChatMessageTool, @@ -195,7 +196,9 @@ export class ChatGPTApi implements LLMApi { let requestPayload: RequestPayload | DalleRequestPayload; const isDalle3 = _isDalle3(options.config.model); - const isO1OrO3 = options.config.model.startsWith("o1") || options.config.model.startsWith("o3"); + const isO1OrO3 = + options.config.model.startsWith("o1") || + options.config.model.startsWith("o3"); if (isDalle3) { const prompt = getMessageTextContent( options.messages.slice(-1)?.pop() as any, @@ -359,7 +362,9 @@ export class ChatGPTApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - isDalle3 || isO1OrO3 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. + isDalle3 || isO1OrO3 + ? REQUEST_TIMEOUT_MS_FOR_THINKING + : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. ); const res = await fetch(chatPath, chatPayload); diff --git a/app/client/platforms/siliconflow.ts b/app/client/platforms/siliconflow.ts index fe2f9862b..1ad316a61 100644 --- a/app/client/platforms/siliconflow.ts +++ b/app/client/platforms/siliconflow.ts @@ -4,7 +4,7 @@ import { ApiPath, SILICONFLOW_BASE_URL, SiliconFlow, - REQUEST_TIMEOUT_MS, + REQUEST_TIMEOUT_MS_FOR_THINKING, } from "@/app/constant"; import { useAccessStore, @@ -120,10 +120,10 @@ export class SiliconflowApi implements LLMApi { // console.log(chatPayload); - // make a fetch request + // Use extended timeout for thinking models as they typically require more processing time const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS, + REQUEST_TIMEOUT_MS_FOR_THINKING, ); if (shouldStream) { @@ -174,8 +174,8 @@ export class SiliconflowApi implements LLMApi { // Skip if both content and reasoning_content are empty or null if ( - (!reasoning || reasoning.trim().length === 0) && - (!content || content.trim().length === 0) + (!reasoning || reasoning.length === 0) && + (!content || content.length === 0) ) { return { isThinking: false, @@ -183,12 +183,12 @@ export class SiliconflowApi implements LLMApi { }; } - if (reasoning && reasoning.trim().length > 0) { + if (reasoning && reasoning.length > 0) { return { isThinking: true, content: reasoning, }; - } else if (content && content.trim().length > 0) { + } else if (content && content.length > 0) { return { isThinking: false, content: content, diff --git a/app/client/platforms/xai.ts b/app/client/platforms/xai.ts index 06dbaaa29..8c41c2d98 100644 --- a/app/client/platforms/xai.ts +++ b/app/client/platforms/xai.ts @@ -17,7 +17,7 @@ import { SpeechOptions, } from "../api"; import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent } from "@/app/utils"; +import { preProcessImageContent } from "@/app/utils/chat"; import { RequestPayload } from "./openai"; import { fetch } from "@/app/utils/stream"; @@ -62,7 +62,7 @@ export class XAIApi implements LLMApi { async chat(options: ChatOptions) { const messages: ChatOptions["messages"] = []; for (const v of options.messages) { - const content = getMessageTextContent(v); + const content = await preProcessImageContent(v.content); messages.push({ role: v.role, content }); } diff --git a/app/components/emoji.tsx b/app/components/emoji.tsx index 54d1c1c99..ecb1c6581 100644 --- a/app/components/emoji.tsx +++ b/app/components/emoji.tsx @@ -6,8 +6,21 @@ import EmojiPicker, { import { ModelType } from "../store"; -import BotIcon from "../icons/bot.svg"; -import BlackBotIcon from "../icons/black-bot.svg"; +import BotIconDefault from "../icons/llm-icons/default.svg"; +import BotIconOpenAI from "../icons/llm-icons/openai.svg"; +import BotIconGemini from "../icons/llm-icons/gemini.svg"; +import BotIconGemma from "../icons/llm-icons/gemma.svg"; +import BotIconClaude from "../icons/llm-icons/claude.svg"; +import BotIconMeta from "../icons/llm-icons/meta.svg"; +import BotIconMistral from "../icons/llm-icons/mistral.svg"; +import BotIconDeepseek from "../icons/llm-icons/deepseek.svg"; +import BotIconMoonshot from "../icons/llm-icons/moonshot.svg"; +import BotIconQwen from "../icons/llm-icons/qwen.svg"; +import BotIconWenxin from "../icons/llm-icons/wenxin.svg"; +import BotIconGrok from "../icons/llm-icons/grok.svg"; +import BotIconHunyuan from "../icons/llm-icons/hunyuan.svg"; +import BotIconDoubao from "../icons/llm-icons/doubao.svg"; +import BotIconChatglm from "../icons/llm-icons/chatglm.svg"; export function getEmojiUrl(unified: string, style: EmojiStyle) { // Whoever owns this Content Delivery Network (CDN), I am using your CDN to serve emojis @@ -33,17 +46,55 @@ export function AvatarPicker(props: { } export function Avatar(props: { model?: ModelType; avatar?: string }) { + let LlmIcon = BotIconDefault; + if (props.model) { + const modelName = props.model.toLowerCase(); + + if ( + modelName.startsWith("gpt") || + modelName.startsWith("chatgpt") || + modelName.startsWith("dall-e") || + modelName.startsWith("dalle") || + modelName.startsWith("o1") || + modelName.startsWith("o3") + ) { + LlmIcon = BotIconOpenAI; + } else if (modelName.startsWith("gemini")) { + LlmIcon = BotIconGemini; + } else if (modelName.startsWith("gemma")) { + LlmIcon = BotIconGemma; + } else if (modelName.startsWith("claude")) { + LlmIcon = BotIconClaude; + } else if (modelName.startsWith("llama")) { + LlmIcon = BotIconMeta; + } else if (modelName.startsWith("mixtral")) { + LlmIcon = BotIconMistral; + } else if (modelName.startsWith("deepseek")) { + LlmIcon = BotIconDeepseek; + } else if (modelName.startsWith("moonshot")) { + LlmIcon = BotIconMoonshot; + } else if (modelName.startsWith("qwen")) { + LlmIcon = BotIconQwen; + } else if (modelName.startsWith("ernie")) { + LlmIcon = BotIconWenxin; + } else if (modelName.startsWith("grok")) { + LlmIcon = BotIconGrok; + } else if (modelName.startsWith("hunyuan")) { + LlmIcon = BotIconHunyuan; + } else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) { + LlmIcon = BotIconDoubao; + } else if ( + modelName.startsWith("glm") || + modelName.startsWith("cogview-") || + modelName.startsWith("cogvideox-") + ) { + LlmIcon = BotIconChatglm; + } + return (