mirror of
				https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
				synced 2025-11-04 08:13:43 +08:00 
			
		
		
		
	Compare commits
	
		
			43 Commits
		
	
	
		
			Leizhenpen
			...
			96f40e24ca
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					96f40e24ca | ||
| 
						 | 
					c261ebc82c | ||
| 
						 | 
					f7c747c65f | ||
| 
						 | 
					48469bd8ca | ||
| 
						 | 
					5a5e887f2b | ||
| 
						 | 
					b6f5d75656 | ||
| 
						 | 
					0d41a17ef6 | ||
| 
						 | 
					f7cde17919 | ||
| 
						 | 
					570cbb34b6 | ||
| 
						 | 
					7aa9ae0a3e | ||
| 
						 | 
					2d4180f5be | ||
| 
						 | 
					9f0182b55e | ||
| 
						 | 
					ad6666eeaf | ||
| 
						 | 
					a2c4e468a0 | ||
| 
						 | 
					2167076652 | ||
| 
						 | 
					e123076250 | ||
| 
						 | 
					ebcb4db245 | ||
| 
						 | 
					0a25a1a8cb | ||
| 
						 | 
					f3154b20a5 | ||
| 
						 | 
					b709ee3983 | ||
| 
						 | 
					f5f3ce94f6 | ||
| 
						 | 
					2b5f600308 | ||
| 
						 | 
					b966107117 | ||
| 
						 | 
					377480b448 | ||
| 
						 | 
					8bd0d6a1a7 | ||
| 
						 | 
					90827fc593 | ||
| 
						 | 
					008e339b6d | ||
| 
						 | 
					12863f5213 | ||
| 
						 | 
					cf140d4228 | ||
| 
						 | 
					476d946f96 | ||
| 
						 | 
					9714258322 | ||
| 
						 | 
					48cd4b11b5 | ||
| 
						 | 
					77c78b230a | ||
| 
						 | 
					b44686b887 | ||
| 
						 | 
					34bdd4b945 | ||
| 
						 | 
					b0758cccde | ||
| 
						 | 
					98a11e56d2 | ||
| 
						 | 
					86f86962fb | ||
| 
						 | 
					2137aa65bf | ||
| 
						 | 
					18fa2cc30d | ||
| 
						 | 
					0bfc648085 | ||
| 
						 | 
					9f91c2d05c | ||
| 
						 | 
					a029b4330b | 
@@ -1,13 +1,13 @@
 | 
			
		||||
<div align="center">
 | 
			
		||||
 | 
			
		||||
<a href='https://nextchat.dev/chat'>
 | 
			
		||||
<a href='https://nextchat.club'>
 | 
			
		||||
  <img src="https://github.com/user-attachments/assets/83bdcc07-ae5e-4954-a53a-ac151ba6ccf3" width="1000" alt="icon"/>
 | 
			
		||||
</a>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
<h1 align="center">NextChat (ChatGPT Next Web)</h1>
 | 
			
		||||
<h1 align="center">NextChat</h1>
 | 
			
		||||
 | 
			
		||||
English / [简体中文](./README_CN.md)
 | 
			
		||||
 | 
			
		||||
@@ -22,10 +22,10 @@ English / [简体中文](./README_CN.md)
 | 
			
		||||
[![MacOS][MacOS-image]][download-url]
 | 
			
		||||
[![Linux][Linux-image]][download-url]
 | 
			
		||||
 | 
			
		||||
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) 
 | 
			
		||||
[NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
[saas-url]: https://nextchat.dev/chat?utm_source=readme
 | 
			
		||||
[saas-url]: https://nextchat.club?utm_source=readme
 | 
			
		||||
[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge
 | 
			
		||||
[web-url]: https://app.nextchat.dev/
 | 
			
		||||
[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases
 | 
			
		||||
 
 | 
			
		||||
@@ -8,7 +8,7 @@
 | 
			
		||||
 | 
			
		||||
一键免费部署你的私人 ChatGPT 网页应用,支持 Claude, GPT4 & Gemini Pro 模型。
 | 
			
		||||
 | 
			
		||||
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N)
 | 
			
		||||
[NextChatAI](https://nextchat.club?utm_source=readme) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N)
 | 
			
		||||
 | 
			
		||||
[<img src="https://vercel.com/button" alt="Deploy on Zeabur" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA)  [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -5,7 +5,7 @@
 | 
			
		||||
 | 
			
		||||
ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。
 | 
			
		||||
 | 
			
		||||
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N)
 | 
			
		||||
[NextChatAI](https://nextchat.club?utm_source=readme) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N)
 | 
			
		||||
 | 
			
		||||
[<img src="https://vercel.com/button" alt="Zeaburでデプロイ" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Zeaburでデプロイ" height="30">](https://zeabur.com/templates/ZBUEFA)  [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Gitpodで開く" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -14,8 +14,12 @@ function getModels(remoteModelRes: OpenAIListModelResponse) {
 | 
			
		||||
  if (config.disableGPT4) {
 | 
			
		||||
    remoteModelRes.data = remoteModelRes.data.filter(
 | 
			
		||||
      (m) =>
 | 
			
		||||
        !(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o") || m.id.startsWith("o1") || m.id.startsWith("o3")) ||
 | 
			
		||||
        m.id.startsWith("gpt-4o-mini"),
 | 
			
		||||
        !(
 | 
			
		||||
          m.id.startsWith("gpt-4") ||
 | 
			
		||||
          m.id.startsWith("chatgpt-4o") ||
 | 
			
		||||
          m.id.startsWith("o1") ||
 | 
			
		||||
          m.id.startsWith("o3")
 | 
			
		||||
        ) || m.id.startsWith("gpt-4o-mini"),
 | 
			
		||||
    );
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -40,6 +40,11 @@ export interface MultimodalContent {
 | 
			
		||||
  };
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface MultimodalContentForAlibaba {
 | 
			
		||||
  text?: string;
 | 
			
		||||
  image?: string;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface RequestMessage {
 | 
			
		||||
  role: MessageRole;
 | 
			
		||||
  content: string | MultimodalContent[];
 | 
			
		||||
 
 | 
			
		||||
@@ -1,12 +1,16 @@
 | 
			
		||||
"use client";
 | 
			
		||||
import { ApiPath, Alibaba, ALIBABA_BASE_URL } from "@/app/constant";
 | 
			
		||||
import {
 | 
			
		||||
  ApiPath,
 | 
			
		||||
  Alibaba,
 | 
			
		||||
  ALIBABA_BASE_URL,
 | 
			
		||||
  REQUEST_TIMEOUT_MS,
 | 
			
		||||
} from "@/app/constant";
 | 
			
		||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
 | 
			
		||||
 | 
			
		||||
  useAccessStore,
 | 
			
		||||
  useAppConfig,
 | 
			
		||||
  useChatStore,
 | 
			
		||||
  ChatMessageTool,
 | 
			
		||||
  usePluginStore,
 | 
			
		||||
} from "@/app/store";
 | 
			
		||||
import {
 | 
			
		||||
  preProcessImageContentForAlibabaDashScope,
 | 
			
		||||
  streamWithThink,
 | 
			
		||||
} from "@/app/utils/chat";
 | 
			
		||||
import {
 | 
			
		||||
  ChatOptions,
 | 
			
		||||
  getHeaders,
 | 
			
		||||
@@ -14,15 +18,15 @@ import {
 | 
			
		||||
  LLMModel,
 | 
			
		||||
  SpeechOptions,
 | 
			
		||||
  MultimodalContent,
 | 
			
		||||
  MultimodalContentForAlibaba,
 | 
			
		||||
} from "../api";
 | 
			
		||||
import Locale from "../../locales";
 | 
			
		||||
import {
 | 
			
		||||
  EventStreamContentType,
 | 
			
		||||
  fetchEventSource,
 | 
			
		||||
} from "@fortaine/fetch-event-source";
 | 
			
		||||
import { prettyObject } from "@/app/utils/format";
 | 
			
		||||
import { getClientConfig } from "@/app/config/client";
 | 
			
		||||
import { getMessageTextContent } from "@/app/utils";
 | 
			
		||||
import {
 | 
			
		||||
  getMessageTextContent,
 | 
			
		||||
  getMessageTextContentWithoutThinking,
 | 
			
		||||
  getTimeoutMSByModel,
 | 
			
		||||
  isVisionModel,
 | 
			
		||||
} from "@/app/utils";
 | 
			
		||||
import { fetch } from "@/app/utils/stream";
 | 
			
		||||
 | 
			
		||||
export interface OpenAIListModelResponse {
 | 
			
		||||
@@ -90,11 +94,6 @@ export class QwenApi implements LLMApi {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async chat(options: ChatOptions) {
 | 
			
		||||
    const messages = options.messages.map((v) => ({
 | 
			
		||||
      role: v.role,
 | 
			
		||||
      content: getMessageTextContent(v),
 | 
			
		||||
    }));
 | 
			
		||||
 | 
			
		||||
    const modelConfig = {
 | 
			
		||||
      ...useAppConfig.getState().modelConfig,
 | 
			
		||||
      ...useChatStore.getState().currentSession().mask.modelConfig,
 | 
			
		||||
@@ -103,6 +102,21 @@ export class QwenApi implements LLMApi {
 | 
			
		||||
      },
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    const visionModel = isVisionModel(options.config.model);
 | 
			
		||||
 | 
			
		||||
    const messages: ChatOptions["messages"] = [];
 | 
			
		||||
    for (const v of options.messages) {
 | 
			
		||||
      const content = (
 | 
			
		||||
        visionModel
 | 
			
		||||
          ? await preProcessImageContentForAlibabaDashScope(v.content)
 | 
			
		||||
          : v.role === "assistant"
 | 
			
		||||
          ? getMessageTextContentWithoutThinking(v)
 | 
			
		||||
          : getMessageTextContent(v)
 | 
			
		||||
      ) as any;
 | 
			
		||||
 | 
			
		||||
      messages.push({ role: v.role, content });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const shouldStream = !!options.config.stream;
 | 
			
		||||
    const requestPayload: RequestPayload = {
 | 
			
		||||
      model: modelConfig.model,
 | 
			
		||||
@@ -122,134 +136,120 @@ export class QwenApi implements LLMApi {
 | 
			
		||||
    options.onController?.(controller);
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      const chatPath = this.path(Alibaba.ChatPath);
 | 
			
		||||
      const headers = {
 | 
			
		||||
        ...getHeaders(),
 | 
			
		||||
        "X-DashScope-SSE": shouldStream ? "enable" : "disable",
 | 
			
		||||
      };
 | 
			
		||||
 | 
			
		||||
      const chatPath = this.path(Alibaba.ChatPath(modelConfig.model));
 | 
			
		||||
      const chatPayload = {
 | 
			
		||||
        method: "POST",
 | 
			
		||||
        body: JSON.stringify(requestPayload),
 | 
			
		||||
        signal: controller.signal,
 | 
			
		||||
        headers: {
 | 
			
		||||
          ...getHeaders(),
 | 
			
		||||
          "X-DashScope-SSE": shouldStream ? "enable" : "disable",
 | 
			
		||||
        },
 | 
			
		||||
        headers: headers,
 | 
			
		||||
      };
 | 
			
		||||
 | 
			
		||||
      // make a fetch request
 | 
			
		||||
      const requestTimeoutId = setTimeout(
 | 
			
		||||
        () => controller.abort(),
 | 
			
		||||
        REQUEST_TIMEOUT_MS,
 | 
			
		||||
        getTimeoutMSByModel(options.config.model),
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
      if (shouldStream) {
 | 
			
		||||
        let responseText = "";
 | 
			
		||||
        let remainText = "";
 | 
			
		||||
        let finished = false;
 | 
			
		||||
        let responseRes: Response;
 | 
			
		||||
        const [tools, funcs] = usePluginStore
 | 
			
		||||
          .getState()
 | 
			
		||||
          .getAsTools(
 | 
			
		||||
            useChatStore.getState().currentSession().mask?.plugin || [],
 | 
			
		||||
          );
 | 
			
		||||
        return streamWithThink(
 | 
			
		||||
          chatPath,
 | 
			
		||||
          requestPayload,
 | 
			
		||||
          headers,
 | 
			
		||||
          tools as any,
 | 
			
		||||
          funcs,
 | 
			
		||||
          controller,
 | 
			
		||||
          // parseSSE
 | 
			
		||||
          (text: string, runTools: ChatMessageTool[]) => {
 | 
			
		||||
            // console.log("parseSSE", text, runTools);
 | 
			
		||||
            const json = JSON.parse(text);
 | 
			
		||||
            const choices = json.output.choices as Array<{
 | 
			
		||||
              message: {
 | 
			
		||||
                content: string | null | MultimodalContentForAlibaba[];
 | 
			
		||||
                tool_calls: ChatMessageTool[];
 | 
			
		||||
                reasoning_content: string | null;
 | 
			
		||||
              };
 | 
			
		||||
            }>;
 | 
			
		||||
 | 
			
		||||
        // animate response to make it looks smooth
 | 
			
		||||
        function animateResponseText() {
 | 
			
		||||
          if (finished || controller.signal.aborted) {
 | 
			
		||||
            responseText += remainText;
 | 
			
		||||
            console.log("[Response Animation] finished");
 | 
			
		||||
            if (responseText?.length === 0) {
 | 
			
		||||
              options.onError?.(new Error("empty response from server"));
 | 
			
		||||
            }
 | 
			
		||||
            return;
 | 
			
		||||
          }
 | 
			
		||||
            if (!choices?.length) return { isThinking: false, content: "" };
 | 
			
		||||
 | 
			
		||||
          if (remainText.length > 0) {
 | 
			
		||||
            const fetchCount = Math.max(1, Math.round(remainText.length / 60));
 | 
			
		||||
            const fetchText = remainText.slice(0, fetchCount);
 | 
			
		||||
            responseText += fetchText;
 | 
			
		||||
            remainText = remainText.slice(fetchCount);
 | 
			
		||||
            options.onUpdate?.(responseText, fetchText);
 | 
			
		||||
          }
 | 
			
		||||
 | 
			
		||||
          requestAnimationFrame(animateResponseText);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // start animaion
 | 
			
		||||
        animateResponseText();
 | 
			
		||||
 | 
			
		||||
        const finish = () => {
 | 
			
		||||
          if (!finished) {
 | 
			
		||||
            finished = true;
 | 
			
		||||
            options.onFinish(responseText + remainText, responseRes);
 | 
			
		||||
          }
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        controller.signal.onabort = finish;
 | 
			
		||||
 | 
			
		||||
        fetchEventSource(chatPath, {
 | 
			
		||||
          fetch: fetch as any,
 | 
			
		||||
          ...chatPayload,
 | 
			
		||||
          async onopen(res) {
 | 
			
		||||
            clearTimeout(requestTimeoutId);
 | 
			
		||||
            const contentType = res.headers.get("content-type");
 | 
			
		||||
            console.log(
 | 
			
		||||
              "[Alibaba] request response content type: ",
 | 
			
		||||
              contentType,
 | 
			
		||||
            );
 | 
			
		||||
            responseRes = res;
 | 
			
		||||
 | 
			
		||||
            if (contentType?.startsWith("text/plain")) {
 | 
			
		||||
              responseText = await res.clone().text();
 | 
			
		||||
              return finish();
 | 
			
		||||
            const tool_calls = choices[0]?.message?.tool_calls;
 | 
			
		||||
            if (tool_calls?.length > 0) {
 | 
			
		||||
              const index = tool_calls[0]?.index;
 | 
			
		||||
              const id = tool_calls[0]?.id;
 | 
			
		||||
              const args = tool_calls[0]?.function?.arguments;
 | 
			
		||||
              if (id) {
 | 
			
		||||
                runTools.push({
 | 
			
		||||
                  id,
 | 
			
		||||
                  type: tool_calls[0]?.type,
 | 
			
		||||
                  function: {
 | 
			
		||||
                    name: tool_calls[0]?.function?.name as string,
 | 
			
		||||
                    arguments: args,
 | 
			
		||||
                  },
 | 
			
		||||
                });
 | 
			
		||||
              } else {
 | 
			
		||||
                // @ts-ignore
 | 
			
		||||
                runTools[index]["function"]["arguments"] += args;
 | 
			
		||||
              }
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            const reasoning = choices[0]?.message?.reasoning_content;
 | 
			
		||||
            const content = choices[0]?.message?.content;
 | 
			
		||||
 | 
			
		||||
            // Skip if both content and reasoning_content are empty or null
 | 
			
		||||
            if (
 | 
			
		||||
              !res.ok ||
 | 
			
		||||
              !res.headers
 | 
			
		||||
                .get("content-type")
 | 
			
		||||
                ?.startsWith(EventStreamContentType) ||
 | 
			
		||||
              res.status !== 200
 | 
			
		||||
              (!reasoning || reasoning.length === 0) &&
 | 
			
		||||
              (!content || content.length === 0)
 | 
			
		||||
            ) {
 | 
			
		||||
              const responseTexts = [responseText];
 | 
			
		||||
              let extraInfo = await res.clone().text();
 | 
			
		||||
              try {
 | 
			
		||||
                const resJson = await res.clone().json();
 | 
			
		||||
                extraInfo = prettyObject(resJson);
 | 
			
		||||
              } catch {}
 | 
			
		||||
 | 
			
		||||
              if (res.status === 401) {
 | 
			
		||||
                responseTexts.push(Locale.Error.Unauthorized);
 | 
			
		||||
              }
 | 
			
		||||
 | 
			
		||||
              if (extraInfo) {
 | 
			
		||||
                responseTexts.push(extraInfo);
 | 
			
		||||
              }
 | 
			
		||||
 | 
			
		||||
              responseText = responseTexts.join("\n\n");
 | 
			
		||||
 | 
			
		||||
              return finish();
 | 
			
		||||
              return {
 | 
			
		||||
                isThinking: false,
 | 
			
		||||
                content: "",
 | 
			
		||||
              };
 | 
			
		||||
            }
 | 
			
		||||
          },
 | 
			
		||||
          onmessage(msg) {
 | 
			
		||||
            if (msg.data === "[DONE]" || finished) {
 | 
			
		||||
              return finish();
 | 
			
		||||
            }
 | 
			
		||||
            const text = msg.data;
 | 
			
		||||
            try {
 | 
			
		||||
              const json = JSON.parse(text);
 | 
			
		||||
              const choices = json.output.choices as Array<{
 | 
			
		||||
                message: { content: string };
 | 
			
		||||
              }>;
 | 
			
		||||
              const delta = choices[0]?.message?.content;
 | 
			
		||||
              if (delta) {
 | 
			
		||||
                remainText += delta;
 | 
			
		||||
              }
 | 
			
		||||
            } catch (e) {
 | 
			
		||||
              console.error("[Request] parse error", text, msg);
 | 
			
		||||
 | 
			
		||||
            if (reasoning && reasoning.length > 0) {
 | 
			
		||||
              return {
 | 
			
		||||
                isThinking: true,
 | 
			
		||||
                content: reasoning,
 | 
			
		||||
              };
 | 
			
		||||
            } else if (content && content.length > 0) {
 | 
			
		||||
              return {
 | 
			
		||||
                isThinking: false,
 | 
			
		||||
                content: Array.isArray(content)
 | 
			
		||||
                  ? content.map((item) => item.text).join(",")
 | 
			
		||||
                  : content,
 | 
			
		||||
              };
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            return {
 | 
			
		||||
              isThinking: false,
 | 
			
		||||
              content: "",
 | 
			
		||||
            };
 | 
			
		||||
          },
 | 
			
		||||
          onclose() {
 | 
			
		||||
            finish();
 | 
			
		||||
          // processToolMessage, include tool_calls message and tool call results
 | 
			
		||||
          (
 | 
			
		||||
            requestPayload: RequestPayload,
 | 
			
		||||
            toolCallMessage: any,
 | 
			
		||||
            toolCallResult: any[],
 | 
			
		||||
          ) => {
 | 
			
		||||
            requestPayload?.input?.messages?.splice(
 | 
			
		||||
              requestPayload?.input?.messages?.length,
 | 
			
		||||
              0,
 | 
			
		||||
              toolCallMessage,
 | 
			
		||||
              ...toolCallResult,
 | 
			
		||||
            );
 | 
			
		||||
          },
 | 
			
		||||
          onerror(e) {
 | 
			
		||||
            options.onError?.(e);
 | 
			
		||||
            throw e;
 | 
			
		||||
          },
 | 
			
		||||
          openWhenHidden: true,
 | 
			
		||||
        });
 | 
			
		||||
          options,
 | 
			
		||||
        );
 | 
			
		||||
      } else {
 | 
			
		||||
        const res = await fetch(chatPath, chatPayload);
 | 
			
		||||
        clearTimeout(requestTimeoutId);
 | 
			
		||||
 
 | 
			
		||||
@@ -1,10 +1,5 @@
 | 
			
		||||
"use client";
 | 
			
		||||
import {
 | 
			
		||||
  ApiPath,
 | 
			
		||||
  Baidu,
 | 
			
		||||
  BAIDU_BASE_URL,
 | 
			
		||||
  REQUEST_TIMEOUT_MS,
 | 
			
		||||
} from "@/app/constant";
 | 
			
		||||
import { ApiPath, Baidu, BAIDU_BASE_URL } from "@/app/constant";
 | 
			
		||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
 | 
			
		||||
import { getAccessToken } from "@/app/utils/baidu";
 | 
			
		||||
 | 
			
		||||
@@ -23,7 +18,7 @@ import {
 | 
			
		||||
} from "@fortaine/fetch-event-source";
 | 
			
		||||
import { prettyObject } from "@/app/utils/format";
 | 
			
		||||
import { getClientConfig } from "@/app/config/client";
 | 
			
		||||
import { getMessageTextContent } from "@/app/utils";
 | 
			
		||||
import { getMessageTextContent, getTimeoutMSByModel } from "@/app/utils";
 | 
			
		||||
import { fetch } from "@/app/utils/stream";
 | 
			
		||||
 | 
			
		||||
export interface OpenAIListModelResponse {
 | 
			
		||||
@@ -155,7 +150,7 @@ export class ErnieApi implements LLMApi {
 | 
			
		||||
      // make a fetch request
 | 
			
		||||
      const requestTimeoutId = setTimeout(
 | 
			
		||||
        () => controller.abort(),
 | 
			
		||||
        REQUEST_TIMEOUT_MS,
 | 
			
		||||
        getTimeoutMSByModel(options.config.model),
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
      if (shouldStream) {
 | 
			
		||||
 
 | 
			
		||||
@@ -1,11 +1,12 @@
 | 
			
		||||
"use client";
 | 
			
		||||
import { ApiPath, ByteDance, BYTEDANCE_BASE_URL } from "@/app/constant";
 | 
			
		||||
import {
 | 
			
		||||
  ApiPath,
 | 
			
		||||
  ByteDance,
 | 
			
		||||
  BYTEDANCE_BASE_URL,
 | 
			
		||||
  REQUEST_TIMEOUT_MS,
 | 
			
		||||
} from "@/app/constant";
 | 
			
		||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
 | 
			
		||||
  useAccessStore,
 | 
			
		||||
  useAppConfig,
 | 
			
		||||
  useChatStore,
 | 
			
		||||
  ChatMessageTool,
 | 
			
		||||
  usePluginStore,
 | 
			
		||||
} from "@/app/store";
 | 
			
		||||
 | 
			
		||||
import {
 | 
			
		||||
  ChatOptions,
 | 
			
		||||
@@ -15,14 +16,14 @@ import {
 | 
			
		||||
  MultimodalContent,
 | 
			
		||||
  SpeechOptions,
 | 
			
		||||
} from "../api";
 | 
			
		||||
import Locale from "../../locales";
 | 
			
		||||
import {
 | 
			
		||||
  EventStreamContentType,
 | 
			
		||||
  fetchEventSource,
 | 
			
		||||
} from "@fortaine/fetch-event-source";
 | 
			
		||||
import { prettyObject } from "@/app/utils/format";
 | 
			
		||||
 | 
			
		||||
import { streamWithThink } from "@/app/utils/chat";
 | 
			
		||||
import { getClientConfig } from "@/app/config/client";
 | 
			
		||||
import { preProcessImageContent } from "@/app/utils/chat";
 | 
			
		||||
import {
 | 
			
		||||
  getMessageTextContentWithoutThinking,
 | 
			
		||||
  getTimeoutMSByModel,
 | 
			
		||||
} from "@/app/utils";
 | 
			
		||||
import { fetch } from "@/app/utils/stream";
 | 
			
		||||
 | 
			
		||||
export interface OpenAIListModelResponse {
 | 
			
		||||
@@ -34,7 +35,7 @@ export interface OpenAIListModelResponse {
 | 
			
		||||
  }>;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
interface RequestPayload {
 | 
			
		||||
interface RequestPayloadForByteDance {
 | 
			
		||||
  messages: {
 | 
			
		||||
    role: "system" | "user" | "assistant";
 | 
			
		||||
    content: string | MultimodalContent[];
 | 
			
		||||
@@ -86,7 +87,10 @@ export class DoubaoApi implements LLMApi {
 | 
			
		||||
  async chat(options: ChatOptions) {
 | 
			
		||||
    const messages: ChatOptions["messages"] = [];
 | 
			
		||||
    for (const v of options.messages) {
 | 
			
		||||
      const content = await preProcessImageContent(v.content);
 | 
			
		||||
      const content =
 | 
			
		||||
        v.role === "assistant"
 | 
			
		||||
          ? getMessageTextContentWithoutThinking(v)
 | 
			
		||||
          : await preProcessImageContent(v.content);
 | 
			
		||||
      messages.push({ role: v.role, content });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -99,7 +103,7 @@ export class DoubaoApi implements LLMApi {
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    const shouldStream = !!options.config.stream;
 | 
			
		||||
    const requestPayload: RequestPayload = {
 | 
			
		||||
    const requestPayload: RequestPayloadForByteDance = {
 | 
			
		||||
      messages,
 | 
			
		||||
      stream: shouldStream,
 | 
			
		||||
      model: modelConfig.model,
 | 
			
		||||
@@ -124,119 +128,101 @@ export class DoubaoApi implements LLMApi {
 | 
			
		||||
      // make a fetch request
 | 
			
		||||
      const requestTimeoutId = setTimeout(
 | 
			
		||||
        () => controller.abort(),
 | 
			
		||||
        REQUEST_TIMEOUT_MS,
 | 
			
		||||
        getTimeoutMSByModel(options.config.model),
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
      if (shouldStream) {
 | 
			
		||||
        let responseText = "";
 | 
			
		||||
        let remainText = "";
 | 
			
		||||
        let finished = false;
 | 
			
		||||
        let responseRes: Response;
 | 
			
		||||
        const [tools, funcs] = usePluginStore
 | 
			
		||||
          .getState()
 | 
			
		||||
          .getAsTools(
 | 
			
		||||
            useChatStore.getState().currentSession().mask?.plugin || [],
 | 
			
		||||
          );
 | 
			
		||||
        return streamWithThink(
 | 
			
		||||
          chatPath,
 | 
			
		||||
          requestPayload,
 | 
			
		||||
          getHeaders(),
 | 
			
		||||
          tools as any,
 | 
			
		||||
          funcs,
 | 
			
		||||
          controller,
 | 
			
		||||
          // parseSSE
 | 
			
		||||
          (text: string, runTools: ChatMessageTool[]) => {
 | 
			
		||||
            // console.log("parseSSE", text, runTools);
 | 
			
		||||
            const json = JSON.parse(text);
 | 
			
		||||
            const choices = json.choices as Array<{
 | 
			
		||||
              delta: {
 | 
			
		||||
                content: string | null;
 | 
			
		||||
                tool_calls: ChatMessageTool[];
 | 
			
		||||
                reasoning_content: string | null;
 | 
			
		||||
              };
 | 
			
		||||
            }>;
 | 
			
		||||
 | 
			
		||||
        // animate response to make it looks smooth
 | 
			
		||||
        function animateResponseText() {
 | 
			
		||||
          if (finished || controller.signal.aborted) {
 | 
			
		||||
            responseText += remainText;
 | 
			
		||||
            console.log("[Response Animation] finished");
 | 
			
		||||
            if (responseText?.length === 0) {
 | 
			
		||||
              options.onError?.(new Error("empty response from server"));
 | 
			
		||||
            }
 | 
			
		||||
            return;
 | 
			
		||||
          }
 | 
			
		||||
 | 
			
		||||
          if (remainText.length > 0) {
 | 
			
		||||
            const fetchCount = Math.max(1, Math.round(remainText.length / 60));
 | 
			
		||||
            const fetchText = remainText.slice(0, fetchCount);
 | 
			
		||||
            responseText += fetchText;
 | 
			
		||||
            remainText = remainText.slice(fetchCount);
 | 
			
		||||
            options.onUpdate?.(responseText, fetchText);
 | 
			
		||||
          }
 | 
			
		||||
 | 
			
		||||
          requestAnimationFrame(animateResponseText);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // start animaion
 | 
			
		||||
        animateResponseText();
 | 
			
		||||
 | 
			
		||||
        const finish = () => {
 | 
			
		||||
          if (!finished) {
 | 
			
		||||
            finished = true;
 | 
			
		||||
            options.onFinish(responseText + remainText, responseRes);
 | 
			
		||||
          }
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        controller.signal.onabort = finish;
 | 
			
		||||
 | 
			
		||||
        fetchEventSource(chatPath, {
 | 
			
		||||
          fetch: fetch as any,
 | 
			
		||||
          ...chatPayload,
 | 
			
		||||
          async onopen(res) {
 | 
			
		||||
            clearTimeout(requestTimeoutId);
 | 
			
		||||
            const contentType = res.headers.get("content-type");
 | 
			
		||||
            console.log(
 | 
			
		||||
              "[ByteDance] request response content type: ",
 | 
			
		||||
              contentType,
 | 
			
		||||
            );
 | 
			
		||||
            responseRes = res;
 | 
			
		||||
            if (contentType?.startsWith("text/plain")) {
 | 
			
		||||
              responseText = await res.clone().text();
 | 
			
		||||
              return finish();
 | 
			
		||||
            if (!choices?.length) return { isThinking: false, content: "" };
 | 
			
		||||
 | 
			
		||||
            const tool_calls = choices[0]?.delta?.tool_calls;
 | 
			
		||||
            if (tool_calls?.length > 0) {
 | 
			
		||||
              const index = tool_calls[0]?.index;
 | 
			
		||||
              const id = tool_calls[0]?.id;
 | 
			
		||||
              const args = tool_calls[0]?.function?.arguments;
 | 
			
		||||
              if (id) {
 | 
			
		||||
                runTools.push({
 | 
			
		||||
                  id,
 | 
			
		||||
                  type: tool_calls[0]?.type,
 | 
			
		||||
                  function: {
 | 
			
		||||
                    name: tool_calls[0]?.function?.name as string,
 | 
			
		||||
                    arguments: args,
 | 
			
		||||
                  },
 | 
			
		||||
                });
 | 
			
		||||
              } else {
 | 
			
		||||
                // @ts-ignore
 | 
			
		||||
                runTools[index]["function"]["arguments"] += args;
 | 
			
		||||
              }
 | 
			
		||||
            }
 | 
			
		||||
            const reasoning = choices[0]?.delta?.reasoning_content;
 | 
			
		||||
            const content = choices[0]?.delta?.content;
 | 
			
		||||
 | 
			
		||||
            // Skip if both content and reasoning_content are empty or null
 | 
			
		||||
            if (
 | 
			
		||||
              !res.ok ||
 | 
			
		||||
              !res.headers
 | 
			
		||||
                .get("content-type")
 | 
			
		||||
                ?.startsWith(EventStreamContentType) ||
 | 
			
		||||
              res.status !== 200
 | 
			
		||||
              (!reasoning || reasoning.length === 0) &&
 | 
			
		||||
              (!content || content.length === 0)
 | 
			
		||||
            ) {
 | 
			
		||||
              const responseTexts = [responseText];
 | 
			
		||||
              let extraInfo = await res.clone().text();
 | 
			
		||||
              try {
 | 
			
		||||
                const resJson = await res.clone().json();
 | 
			
		||||
                extraInfo = prettyObject(resJson);
 | 
			
		||||
              } catch {}
 | 
			
		||||
 | 
			
		||||
              if (res.status === 401) {
 | 
			
		||||
                responseTexts.push(Locale.Error.Unauthorized);
 | 
			
		||||
              }
 | 
			
		||||
 | 
			
		||||
              if (extraInfo) {
 | 
			
		||||
                responseTexts.push(extraInfo);
 | 
			
		||||
              }
 | 
			
		||||
 | 
			
		||||
              responseText = responseTexts.join("\n\n");
 | 
			
		||||
 | 
			
		||||
              return finish();
 | 
			
		||||
              return {
 | 
			
		||||
                isThinking: false,
 | 
			
		||||
                content: "",
 | 
			
		||||
              };
 | 
			
		||||
            }
 | 
			
		||||
          },
 | 
			
		||||
          onmessage(msg) {
 | 
			
		||||
            if (msg.data === "[DONE]" || finished) {
 | 
			
		||||
              return finish();
 | 
			
		||||
            }
 | 
			
		||||
            const text = msg.data;
 | 
			
		||||
            try {
 | 
			
		||||
              const json = JSON.parse(text);
 | 
			
		||||
              const choices = json.choices as Array<{
 | 
			
		||||
                delta: { content: string };
 | 
			
		||||
              }>;
 | 
			
		||||
              const delta = choices[0]?.delta?.content;
 | 
			
		||||
              if (delta) {
 | 
			
		||||
                remainText += delta;
 | 
			
		||||
              }
 | 
			
		||||
            } catch (e) {
 | 
			
		||||
              console.error("[Request] parse error", text, msg);
 | 
			
		||||
 | 
			
		||||
            if (reasoning && reasoning.length > 0) {
 | 
			
		||||
              return {
 | 
			
		||||
                isThinking: true,
 | 
			
		||||
                content: reasoning,
 | 
			
		||||
              };
 | 
			
		||||
            } else if (content && content.length > 0) {
 | 
			
		||||
              return {
 | 
			
		||||
                isThinking: false,
 | 
			
		||||
                content: content,
 | 
			
		||||
              };
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            return {
 | 
			
		||||
              isThinking: false,
 | 
			
		||||
              content: "",
 | 
			
		||||
            };
 | 
			
		||||
          },
 | 
			
		||||
          onclose() {
 | 
			
		||||
            finish();
 | 
			
		||||
          // processToolMessage, include tool_calls message and tool call results
 | 
			
		||||
          (
 | 
			
		||||
            requestPayload: RequestPayloadForByteDance,
 | 
			
		||||
            toolCallMessage: any,
 | 
			
		||||
            toolCallResult: any[],
 | 
			
		||||
          ) => {
 | 
			
		||||
            requestPayload?.messages?.splice(
 | 
			
		||||
              requestPayload?.messages?.length,
 | 
			
		||||
              0,
 | 
			
		||||
              toolCallMessage,
 | 
			
		||||
              ...toolCallResult,
 | 
			
		||||
            );
 | 
			
		||||
          },
 | 
			
		||||
          onerror(e) {
 | 
			
		||||
            options.onError?.(e);
 | 
			
		||||
            throw e;
 | 
			
		||||
          },
 | 
			
		||||
          openWhenHidden: true,
 | 
			
		||||
        });
 | 
			
		||||
          options,
 | 
			
		||||
        );
 | 
			
		||||
      } else {
 | 
			
		||||
        const res = await fetch(chatPath, chatPayload);
 | 
			
		||||
        clearTimeout(requestTimeoutId);
 | 
			
		||||
 
 | 
			
		||||
@@ -1,12 +1,6 @@
 | 
			
		||||
"use client";
 | 
			
		||||
// azure and openai, using same models. so using same LLMApi.
 | 
			
		||||
import {
 | 
			
		||||
  ApiPath,
 | 
			
		||||
  DEEPSEEK_BASE_URL,
 | 
			
		||||
  DeepSeek,
 | 
			
		||||
  REQUEST_TIMEOUT_MS,
 | 
			
		||||
  REQUEST_TIMEOUT_MS_FOR_THINKING,
 | 
			
		||||
} from "@/app/constant";
 | 
			
		||||
import { ApiPath, DEEPSEEK_BASE_URL, DeepSeek } from "@/app/constant";
 | 
			
		||||
import {
 | 
			
		||||
  useAccessStore,
 | 
			
		||||
  useAppConfig,
 | 
			
		||||
@@ -26,6 +20,7 @@ import { getClientConfig } from "@/app/config/client";
 | 
			
		||||
import {
 | 
			
		||||
  getMessageTextContent,
 | 
			
		||||
  getMessageTextContentWithoutThinking,
 | 
			
		||||
  getTimeoutMSByModel,
 | 
			
		||||
} from "@/app/utils";
 | 
			
		||||
import { RequestPayload } from "./openai";
 | 
			
		||||
import { fetch } from "@/app/utils/stream";
 | 
			
		||||
@@ -80,6 +75,25 @@ export class DeepSeekApi implements LLMApi {
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // 检测并修复消息顺序,确保除system外的第一个消息是user
 | 
			
		||||
    const filteredMessages: ChatOptions["messages"] = [];
 | 
			
		||||
    let hasFoundFirstUser = false;
 | 
			
		||||
 | 
			
		||||
    for (const msg of messages) {
 | 
			
		||||
      if (msg.role === "system") {
 | 
			
		||||
        // Keep all system messages
 | 
			
		||||
        filteredMessages.push(msg);
 | 
			
		||||
      } else if (msg.role === "user") {
 | 
			
		||||
        // User message directly added
 | 
			
		||||
        filteredMessages.push(msg);
 | 
			
		||||
        hasFoundFirstUser = true;
 | 
			
		||||
      } else if (hasFoundFirstUser) {
 | 
			
		||||
        // After finding the first user message, all subsequent non-system messages are retained.
 | 
			
		||||
        filteredMessages.push(msg);
 | 
			
		||||
      }
 | 
			
		||||
      // If hasFoundFirstUser is false and it is not a system message, it will be skipped.
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const modelConfig = {
 | 
			
		||||
      ...useAppConfig.getState().modelConfig,
 | 
			
		||||
      ...useChatStore.getState().currentSession().mask.modelConfig,
 | 
			
		||||
@@ -90,7 +104,7 @@ export class DeepSeekApi implements LLMApi {
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    const requestPayload: RequestPayload = {
 | 
			
		||||
      messages,
 | 
			
		||||
      messages: filteredMessages,
 | 
			
		||||
      stream: options.config.stream,
 | 
			
		||||
      model: modelConfig.model,
 | 
			
		||||
      temperature: modelConfig.temperature,
 | 
			
		||||
@@ -116,16 +130,10 @@ export class DeepSeekApi implements LLMApi {
 | 
			
		||||
        headers: getHeaders(),
 | 
			
		||||
      };
 | 
			
		||||
 | 
			
		||||
      // console.log(chatPayload);
 | 
			
		||||
 | 
			
		||||
      const isR1 =
 | 
			
		||||
        options.config.model.endsWith("-reasoner") ||
 | 
			
		||||
        options.config.model.endsWith("-r1");
 | 
			
		||||
 | 
			
		||||
      // make a fetch request
 | 
			
		||||
      const requestTimeoutId = setTimeout(
 | 
			
		||||
        () => controller.abort(),
 | 
			
		||||
        isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
 | 
			
		||||
        getTimeoutMSByModel(options.config.model),
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
      if (shouldStream) {
 | 
			
		||||
@@ -176,8 +184,8 @@ export class DeepSeekApi implements LLMApi {
 | 
			
		||||
 | 
			
		||||
            // Skip if both content and reasoning_content are empty or null
 | 
			
		||||
            if (
 | 
			
		||||
              (!reasoning || reasoning.trim().length === 0) &&
 | 
			
		||||
              (!content || content.trim().length === 0)
 | 
			
		||||
              (!reasoning || reasoning.length === 0) &&
 | 
			
		||||
              (!content || content.length === 0)
 | 
			
		||||
            ) {
 | 
			
		||||
              return {
 | 
			
		||||
                isThinking: false,
 | 
			
		||||
@@ -185,12 +193,12 @@ export class DeepSeekApi implements LLMApi {
 | 
			
		||||
              };
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            if (reasoning && reasoning.trim().length > 0) {
 | 
			
		||||
            if (reasoning && reasoning.length > 0) {
 | 
			
		||||
              return {
 | 
			
		||||
                isThinking: true,
 | 
			
		||||
                content: reasoning,
 | 
			
		||||
              };
 | 
			
		||||
            } else if (content && content.trim().length > 0) {
 | 
			
		||||
            } else if (content && content.length > 0) {
 | 
			
		||||
              return {
 | 
			
		||||
                isThinking: false,
 | 
			
		||||
                content: content,
 | 
			
		||||
 
 | 
			
		||||
@@ -1,10 +1,5 @@
 | 
			
		||||
"use client";
 | 
			
		||||
import {
 | 
			
		||||
  ApiPath,
 | 
			
		||||
  CHATGLM_BASE_URL,
 | 
			
		||||
  ChatGLM,
 | 
			
		||||
  REQUEST_TIMEOUT_MS,
 | 
			
		||||
} from "@/app/constant";
 | 
			
		||||
import { ApiPath, CHATGLM_BASE_URL, ChatGLM } from "@/app/constant";
 | 
			
		||||
import {
 | 
			
		||||
  useAccessStore,
 | 
			
		||||
  useAppConfig,
 | 
			
		||||
@@ -21,7 +16,11 @@ import {
 | 
			
		||||
  SpeechOptions,
 | 
			
		||||
} from "../api";
 | 
			
		||||
import { getClientConfig } from "@/app/config/client";
 | 
			
		||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
 | 
			
		||||
import {
 | 
			
		||||
  getMessageTextContent,
 | 
			
		||||
  isVisionModel,
 | 
			
		||||
  getTimeoutMSByModel,
 | 
			
		||||
} from "@/app/utils";
 | 
			
		||||
import { RequestPayload } from "./openai";
 | 
			
		||||
import { fetch } from "@/app/utils/stream";
 | 
			
		||||
import { preProcessImageContent } from "@/app/utils/chat";
 | 
			
		||||
@@ -191,7 +190,7 @@ export class ChatGLMApi implements LLMApi {
 | 
			
		||||
 | 
			
		||||
      const requestTimeoutId = setTimeout(
 | 
			
		||||
        () => controller.abort(),
 | 
			
		||||
        REQUEST_TIMEOUT_MS,
 | 
			
		||||
        getTimeoutMSByModel(options.config.model),
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
      if (modelType === "image" || modelType === "video") {
 | 
			
		||||
 
 | 
			
		||||
@@ -1,9 +1,4 @@
 | 
			
		||||
import {
 | 
			
		||||
  ApiPath,
 | 
			
		||||
  Google,
 | 
			
		||||
  REQUEST_TIMEOUT_MS,
 | 
			
		||||
  REQUEST_TIMEOUT_MS_FOR_THINKING,
 | 
			
		||||
} from "@/app/constant";
 | 
			
		||||
import { ApiPath, Google } from "@/app/constant";
 | 
			
		||||
import {
 | 
			
		||||
  ChatOptions,
 | 
			
		||||
  getHeaders,
 | 
			
		||||
@@ -27,6 +22,7 @@ import {
 | 
			
		||||
  getMessageTextContent,
 | 
			
		||||
  getMessageImages,
 | 
			
		||||
  isVisionModel,
 | 
			
		||||
  getTimeoutMSByModel,
 | 
			
		||||
} from "@/app/utils";
 | 
			
		||||
import { preProcessImageContent } from "@/app/utils/chat";
 | 
			
		||||
import { nanoid } from "nanoid";
 | 
			
		||||
@@ -206,7 +202,7 @@ export class GeminiProApi implements LLMApi {
 | 
			
		||||
      // make a fetch request
 | 
			
		||||
      const requestTimeoutId = setTimeout(
 | 
			
		||||
        () => controller.abort(),
 | 
			
		||||
        isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
 | 
			
		||||
        getTimeoutMSByModel(options.config.model),
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
      if (shouldStream) {
 | 
			
		||||
 
 | 
			
		||||
@@ -8,7 +8,6 @@ import {
 | 
			
		||||
  Azure,
 | 
			
		||||
  REQUEST_TIMEOUT_MS,
 | 
			
		||||
  ServiceProvider,
 | 
			
		||||
  REQUEST_TIMEOUT_MS_FOR_THINKING,
 | 
			
		||||
} from "@/app/constant";
 | 
			
		||||
import {
 | 
			
		||||
  ChatMessageTool,
 | 
			
		||||
@@ -22,7 +21,7 @@ import {
 | 
			
		||||
  preProcessImageContent,
 | 
			
		||||
  uploadImage,
 | 
			
		||||
  base64Image2Blob,
 | 
			
		||||
  stream,
 | 
			
		||||
  streamWithThink,
 | 
			
		||||
} from "@/app/utils/chat";
 | 
			
		||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
 | 
			
		||||
import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing";
 | 
			
		||||
@@ -42,6 +41,7 @@ import {
 | 
			
		||||
  getMessageTextContent,
 | 
			
		||||
  isVisionModel,
 | 
			
		||||
  isDalle3 as _isDalle3,
 | 
			
		||||
  getTimeoutMSByModel,
 | 
			
		||||
} from "@/app/utils";
 | 
			
		||||
import { fetch } from "@/app/utils/stream";
 | 
			
		||||
 | 
			
		||||
@@ -56,7 +56,7 @@ export interface OpenAIListModelResponse {
 | 
			
		||||
 | 
			
		||||
export interface RequestPayload {
 | 
			
		||||
  messages: {
 | 
			
		||||
    role: "system" | "user" | "assistant";
 | 
			
		||||
    role: "developer" | "system" | "user" | "assistant";
 | 
			
		||||
    content: string | MultimodalContent[];
 | 
			
		||||
  }[];
 | 
			
		||||
  stream?: boolean;
 | 
			
		||||
@@ -237,8 +237,16 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
        // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
 | 
			
		||||
      };
 | 
			
		||||
 | 
			
		||||
      // O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
 | 
			
		||||
      if (isO1OrO3) {
 | 
			
		||||
        // by default the o1/o3 models will not attempt to produce output that includes markdown formatting
 | 
			
		||||
        // manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
 | 
			
		||||
        // (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
 | 
			
		||||
        requestPayload["messages"].unshift({
 | 
			
		||||
          role: "developer",
 | 
			
		||||
          content: "Formatting re-enabled",
 | 
			
		||||
        });
 | 
			
		||||
 | 
			
		||||
        // o1/o3 uses max_completion_tokens to control the number of tokens (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
 | 
			
		||||
        requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
@@ -294,7 +302,7 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
            useChatStore.getState().currentSession().mask?.plugin || [],
 | 
			
		||||
          );
 | 
			
		||||
        // console.log("getAsTools", tools, funcs);
 | 
			
		||||
        stream(
 | 
			
		||||
        streamWithThink(
 | 
			
		||||
          chatPath,
 | 
			
		||||
          requestPayload,
 | 
			
		||||
          getHeaders(),
 | 
			
		||||
@@ -309,8 +317,12 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
              delta: {
 | 
			
		||||
                content: string;
 | 
			
		||||
                tool_calls: ChatMessageTool[];
 | 
			
		||||
                reasoning_content: string | null;
 | 
			
		||||
              };
 | 
			
		||||
            }>;
 | 
			
		||||
 | 
			
		||||
            if (!choices?.length) return { isThinking: false, content: "" };
 | 
			
		||||
 | 
			
		||||
            const tool_calls = choices[0]?.delta?.tool_calls;
 | 
			
		||||
            if (tool_calls?.length > 0) {
 | 
			
		||||
              const id = tool_calls[0]?.id;
 | 
			
		||||
@@ -330,7 +342,37 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
                runTools[index]["function"]["arguments"] += args;
 | 
			
		||||
              }
 | 
			
		||||
            }
 | 
			
		||||
            return choices[0]?.delta?.content;
 | 
			
		||||
 | 
			
		||||
            const reasoning = choices[0]?.delta?.reasoning_content;
 | 
			
		||||
            const content = choices[0]?.delta?.content;
 | 
			
		||||
 | 
			
		||||
            // Skip if both content and reasoning_content are empty or null
 | 
			
		||||
            if (
 | 
			
		||||
              (!reasoning || reasoning.length === 0) &&
 | 
			
		||||
              (!content || content.length === 0)
 | 
			
		||||
            ) {
 | 
			
		||||
              return {
 | 
			
		||||
                isThinking: false,
 | 
			
		||||
                content: "",
 | 
			
		||||
              };
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            if (reasoning && reasoning.length > 0) {
 | 
			
		||||
              return {
 | 
			
		||||
                isThinking: true,
 | 
			
		||||
                content: reasoning,
 | 
			
		||||
              };
 | 
			
		||||
            } else if (content && content.length > 0) {
 | 
			
		||||
              return {
 | 
			
		||||
                isThinking: false,
 | 
			
		||||
                content: content,
 | 
			
		||||
              };
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            return {
 | 
			
		||||
              isThinking: false,
 | 
			
		||||
              content: "",
 | 
			
		||||
            };
 | 
			
		||||
          },
 | 
			
		||||
          // processToolMessage, include tool_calls message and tool call results
 | 
			
		||||
          (
 | 
			
		||||
@@ -362,9 +404,7 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
        // make a fetch request
 | 
			
		||||
        const requestTimeoutId = setTimeout(
 | 
			
		||||
          () => controller.abort(),
 | 
			
		||||
          isDalle3 || isO1OrO3
 | 
			
		||||
            ? REQUEST_TIMEOUT_MS_FOR_THINKING
 | 
			
		||||
            : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
 | 
			
		||||
          getTimeoutMSByModel(options.config.model),
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        const res = await fetch(chatPath, chatPayload);
 | 
			
		||||
 
 | 
			
		||||
@@ -4,7 +4,7 @@ import {
 | 
			
		||||
  ApiPath,
 | 
			
		||||
  SILICONFLOW_BASE_URL,
 | 
			
		||||
  SiliconFlow,
 | 
			
		||||
  REQUEST_TIMEOUT_MS_FOR_THINKING,
 | 
			
		||||
  DEFAULT_MODELS,
 | 
			
		||||
} from "@/app/constant";
 | 
			
		||||
import {
 | 
			
		||||
  useAccessStore,
 | 
			
		||||
@@ -13,7 +13,7 @@ import {
 | 
			
		||||
  ChatMessageTool,
 | 
			
		||||
  usePluginStore,
 | 
			
		||||
} from "@/app/store";
 | 
			
		||||
import { streamWithThink } from "@/app/utils/chat";
 | 
			
		||||
import { preProcessImageContent, streamWithThink } from "@/app/utils/chat";
 | 
			
		||||
import {
 | 
			
		||||
  ChatOptions,
 | 
			
		||||
  getHeaders,
 | 
			
		||||
@@ -25,12 +25,23 @@ import { getClientConfig } from "@/app/config/client";
 | 
			
		||||
import {
 | 
			
		||||
  getMessageTextContent,
 | 
			
		||||
  getMessageTextContentWithoutThinking,
 | 
			
		||||
  isVisionModel,
 | 
			
		||||
  getTimeoutMSByModel,
 | 
			
		||||
} from "@/app/utils";
 | 
			
		||||
import { RequestPayload } from "./openai";
 | 
			
		||||
 | 
			
		||||
import { fetch } from "@/app/utils/stream";
 | 
			
		||||
export interface SiliconFlowListModelResponse {
 | 
			
		||||
  object: string;
 | 
			
		||||
  data: Array<{
 | 
			
		||||
    id: string;
 | 
			
		||||
    object: string;
 | 
			
		||||
    root: string;
 | 
			
		||||
  }>;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export class SiliconflowApi implements LLMApi {
 | 
			
		||||
  private disableListModels = true;
 | 
			
		||||
  private disableListModels = false;
 | 
			
		||||
 | 
			
		||||
  path(path: string): string {
 | 
			
		||||
    const accessStore = useAccessStore.getState();
 | 
			
		||||
@@ -71,13 +82,16 @@ export class SiliconflowApi implements LLMApi {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async chat(options: ChatOptions) {
 | 
			
		||||
    const visionModel = isVisionModel(options.config.model);
 | 
			
		||||
    const messages: ChatOptions["messages"] = [];
 | 
			
		||||
    for (const v of options.messages) {
 | 
			
		||||
      if (v.role === "assistant") {
 | 
			
		||||
        const content = getMessageTextContentWithoutThinking(v);
 | 
			
		||||
        messages.push({ role: v.role, content });
 | 
			
		||||
      } else {
 | 
			
		||||
        const content = getMessageTextContent(v);
 | 
			
		||||
        const content = visionModel
 | 
			
		||||
          ? await preProcessImageContent(v.content)
 | 
			
		||||
          : getMessageTextContent(v);
 | 
			
		||||
        messages.push({ role: v.role, content });
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
@@ -123,7 +137,7 @@ export class SiliconflowApi implements LLMApi {
 | 
			
		||||
      // Use extended timeout for thinking models as they typically require more processing time
 | 
			
		||||
      const requestTimeoutId = setTimeout(
 | 
			
		||||
        () => controller.abort(),
 | 
			
		||||
        REQUEST_TIMEOUT_MS_FOR_THINKING,
 | 
			
		||||
        getTimeoutMSByModel(options.config.model),
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
      if (shouldStream) {
 | 
			
		||||
@@ -238,6 +252,36 @@ export class SiliconflowApi implements LLMApi {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async models(): Promise<LLMModel[]> {
 | 
			
		||||
    return [];
 | 
			
		||||
    if (this.disableListModels) {
 | 
			
		||||
      return DEFAULT_MODELS.slice();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const res = await fetch(this.path(SiliconFlow.ListModelPath), {
 | 
			
		||||
      method: "GET",
 | 
			
		||||
      headers: {
 | 
			
		||||
        ...getHeaders(),
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const resJson = (await res.json()) as SiliconFlowListModelResponse;
 | 
			
		||||
    const chatModels = resJson.data;
 | 
			
		||||
    console.log("[Models]", chatModels);
 | 
			
		||||
 | 
			
		||||
    if (!chatModels) {
 | 
			
		||||
      return [];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    let seq = 1000; //同 Constant.ts 中的排序保持一致
 | 
			
		||||
    return chatModels.map((m) => ({
 | 
			
		||||
      name: m.id,
 | 
			
		||||
      available: true,
 | 
			
		||||
      sorted: seq++,
 | 
			
		||||
      provider: {
 | 
			
		||||
        id: "siliconflow",
 | 
			
		||||
        providerName: "SiliconFlow",
 | 
			
		||||
        providerType: "siliconflow",
 | 
			
		||||
        sorted: 14,
 | 
			
		||||
      },
 | 
			
		||||
    }));
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,5 @@
 | 
			
		||||
"use client";
 | 
			
		||||
import { ApiPath, TENCENT_BASE_URL, REQUEST_TIMEOUT_MS } from "@/app/constant";
 | 
			
		||||
import { ApiPath, TENCENT_BASE_URL } from "@/app/constant";
 | 
			
		||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
 | 
			
		||||
 | 
			
		||||
import {
 | 
			
		||||
@@ -17,7 +17,11 @@ import {
 | 
			
		||||
} from "@fortaine/fetch-event-source";
 | 
			
		||||
import { prettyObject } from "@/app/utils/format";
 | 
			
		||||
import { getClientConfig } from "@/app/config/client";
 | 
			
		||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
 | 
			
		||||
import {
 | 
			
		||||
  getMessageTextContent,
 | 
			
		||||
  isVisionModel,
 | 
			
		||||
  getTimeoutMSByModel,
 | 
			
		||||
} from "@/app/utils";
 | 
			
		||||
import mapKeys from "lodash-es/mapKeys";
 | 
			
		||||
import mapValues from "lodash-es/mapValues";
 | 
			
		||||
import isArray from "lodash-es/isArray";
 | 
			
		||||
@@ -135,7 +139,7 @@ export class HunyuanApi implements LLMApi {
 | 
			
		||||
      // make a fetch request
 | 
			
		||||
      const requestTimeoutId = setTimeout(
 | 
			
		||||
        () => controller.abort(),
 | 
			
		||||
        REQUEST_TIMEOUT_MS,
 | 
			
		||||
        getTimeoutMSByModel(options.config.model),
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
      if (shouldStream) {
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,6 @@
 | 
			
		||||
"use client";
 | 
			
		||||
// azure and openai, using same models. so using same LLMApi.
 | 
			
		||||
import { ApiPath, XAI_BASE_URL, XAI, REQUEST_TIMEOUT_MS } from "@/app/constant";
 | 
			
		||||
import { ApiPath, XAI_BASE_URL, XAI } from "@/app/constant";
 | 
			
		||||
import {
 | 
			
		||||
  useAccessStore,
 | 
			
		||||
  useAppConfig,
 | 
			
		||||
@@ -17,6 +17,7 @@ import {
 | 
			
		||||
  SpeechOptions,
 | 
			
		||||
} from "../api";
 | 
			
		||||
import { getClientConfig } from "@/app/config/client";
 | 
			
		||||
import { getTimeoutMSByModel } from "@/app/utils";
 | 
			
		||||
import { preProcessImageContent } from "@/app/utils/chat";
 | 
			
		||||
import { RequestPayload } from "./openai";
 | 
			
		||||
import { fetch } from "@/app/utils/stream";
 | 
			
		||||
@@ -103,7 +104,7 @@ export class XAIApi implements LLMApi {
 | 
			
		||||
      // make a fetch request
 | 
			
		||||
      const requestTimeoutId = setTimeout(
 | 
			
		||||
        () => controller.abort(),
 | 
			
		||||
        REQUEST_TIMEOUT_MS,
 | 
			
		||||
        getTimeoutMSByModel(options.config.model),
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
      if (shouldStream) {
 | 
			
		||||
 
 | 
			
		||||
@@ -66,11 +66,11 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
 | 
			
		||||
      LlmIcon = BotIconGemma;
 | 
			
		||||
    } else if (modelName.startsWith("claude")) {
 | 
			
		||||
      LlmIcon = BotIconClaude;
 | 
			
		||||
    } else if (modelName.startsWith("llama")) {
 | 
			
		||||
    } else if (modelName.includes("llama")) {
 | 
			
		||||
      LlmIcon = BotIconMeta;
 | 
			
		||||
    } else if (modelName.startsWith("mixtral")) {
 | 
			
		||||
    } else if (modelName.startsWith("mixtral") || modelName.startsWith("codestral")) {
 | 
			
		||||
      LlmIcon = BotIconMistral;
 | 
			
		||||
    } else if (modelName.startsWith("deepseek")) {
 | 
			
		||||
    } else if (modelName.includes("deepseek")) {
 | 
			
		||||
      LlmIcon = BotIconDeepseek;
 | 
			
		||||
    } else if (modelName.startsWith("moonshot")) {
 | 
			
		||||
      LlmIcon = BotIconMoonshot;
 | 
			
		||||
@@ -85,7 +85,7 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
 | 
			
		||||
    } else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
 | 
			
		||||
      LlmIcon = BotIconDoubao;
 | 
			
		||||
    } else if (
 | 
			
		||||
      modelName.startsWith("glm") ||
 | 
			
		||||
      modelName.includes("glm") ||
 | 
			
		||||
      modelName.startsWith("cogview-") ||
 | 
			
		||||
      modelName.startsWith("cogvideox-")
 | 
			
		||||
    ) {
 | 
			
		||||
 
 | 
			
		||||
@@ -23,7 +23,6 @@ import CopyIcon from "../icons/copy.svg";
 | 
			
		||||
import LoadingIcon from "../icons/three-dots.svg";
 | 
			
		||||
import ChatGptIcon from "../icons/chatgpt.png";
 | 
			
		||||
import ShareIcon from "../icons/share.svg";
 | 
			
		||||
import BotIcon from "../icons/bot.png";
 | 
			
		||||
 | 
			
		||||
import DownloadIcon from "../icons/download.svg";
 | 
			
		||||
import { useEffect, useMemo, useRef, useState } from "react";
 | 
			
		||||
@@ -33,13 +32,13 @@ import dynamic from "next/dynamic";
 | 
			
		||||
import NextImage from "next/image";
 | 
			
		||||
 | 
			
		||||
import { toBlob, toPng } from "html-to-image";
 | 
			
		||||
import { DEFAULT_MASK_AVATAR } from "../store/mask";
 | 
			
		||||
 | 
			
		||||
import { prettyObject } from "../utils/format";
 | 
			
		||||
import { EXPORT_MESSAGE_CLASS_NAME } from "../constant";
 | 
			
		||||
import { getClientConfig } from "../config/client";
 | 
			
		||||
import { type ClientApi, getClientApi } from "../client/api";
 | 
			
		||||
import { getMessageTextContent } from "../utils";
 | 
			
		||||
import { MaskAvatar } from "./mask";
 | 
			
		||||
import clsx from "clsx";
 | 
			
		||||
 | 
			
		||||
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
 | 
			
		||||
@@ -407,22 +406,6 @@ export function PreviewActions(props: {
 | 
			
		||||
  );
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function ExportAvatar(props: { avatar: string }) {
 | 
			
		||||
  if (props.avatar === DEFAULT_MASK_AVATAR) {
 | 
			
		||||
    return (
 | 
			
		||||
      <img
 | 
			
		||||
        src={BotIcon.src}
 | 
			
		||||
        width={30}
 | 
			
		||||
        height={30}
 | 
			
		||||
        alt="bot"
 | 
			
		||||
        className="user-avatar"
 | 
			
		||||
      />
 | 
			
		||||
    );
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return <Avatar avatar={props.avatar} />;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export function ImagePreviewer(props: {
 | 
			
		||||
  messages: ChatMessage[];
 | 
			
		||||
  topic: string;
 | 
			
		||||
@@ -546,9 +529,12 @@ export function ImagePreviewer(props: {
 | 
			
		||||
              github.com/ChatGPTNextWeb/ChatGPT-Next-Web
 | 
			
		||||
            </div>
 | 
			
		||||
            <div className={styles["icons"]}>
 | 
			
		||||
              <ExportAvatar avatar={config.avatar} />
 | 
			
		||||
              <MaskAvatar avatar={config.avatar} />
 | 
			
		||||
              <span className={styles["icon-space"]}>&</span>
 | 
			
		||||
              <ExportAvatar avatar={mask.avatar} />
 | 
			
		||||
              <MaskAvatar
 | 
			
		||||
                avatar={mask.avatar}
 | 
			
		||||
                model={session.mask.modelConfig.model}
 | 
			
		||||
              />
 | 
			
		||||
            </div>
 | 
			
		||||
          </div>
 | 
			
		||||
          <div>
 | 
			
		||||
@@ -576,9 +562,14 @@ export function ImagePreviewer(props: {
 | 
			
		||||
              key={i}
 | 
			
		||||
            >
 | 
			
		||||
              <div className={styles["avatar"]}>
 | 
			
		||||
                <ExportAvatar
 | 
			
		||||
                  avatar={m.role === "user" ? config.avatar : mask.avatar}
 | 
			
		||||
                />
 | 
			
		||||
                {m.role === "user" ? (
 | 
			
		||||
                  <Avatar avatar={config.avatar}></Avatar>
 | 
			
		||||
                ) : (
 | 
			
		||||
                  <MaskAvatar
 | 
			
		||||
                    avatar={session.mask.avatar}
 | 
			
		||||
                    model={m.model || session.mask.modelConfig.model}
 | 
			
		||||
                  />
 | 
			
		||||
                )}
 | 
			
		||||
              </div>
 | 
			
		||||
 | 
			
		||||
              <div className={styles["body"]}>
 | 
			
		||||
 
 | 
			
		||||
@@ -221,7 +221,12 @@ export const ByteDance = {
 | 
			
		||||
 | 
			
		||||
export const Alibaba = {
 | 
			
		||||
  ExampleEndpoint: ALIBABA_BASE_URL,
 | 
			
		||||
  ChatPath: "v1/services/aigc/text-generation/generation",
 | 
			
		||||
  ChatPath: (modelName: string) => {
 | 
			
		||||
    if (modelName.includes("vl") || modelName.includes("omni")) {
 | 
			
		||||
      return "v1/services/aigc/multimodal-generation/generation";
 | 
			
		||||
    }
 | 
			
		||||
    return `v1/services/aigc/text-generation/generation`;
 | 
			
		||||
  },
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const Tencent = {
 | 
			
		||||
@@ -258,6 +263,7 @@ export const ChatGLM = {
 | 
			
		||||
export const SiliconFlow = {
 | 
			
		||||
  ExampleEndpoint: SILICONFLOW_BASE_URL,
 | 
			
		||||
  ChatPath: "v1/chat/completions",
 | 
			
		||||
  ListModelPath: "v1/models?&sub_type=chat",
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
 | 
			
		||||
@@ -462,6 +468,7 @@ export const VISION_MODEL_REGEXES = [
 | 
			
		||||
  /gpt-4-turbo(?!.*preview)/, // Matches "gpt-4-turbo" but not "gpt-4-turbo-preview"
 | 
			
		||||
  /^dall-e-3$/, // Matches exactly "dall-e-3"
 | 
			
		||||
  /glm-4v/,
 | 
			
		||||
  /vl/i,
 | 
			
		||||
];
 | 
			
		||||
 | 
			
		||||
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
 | 
			
		||||
@@ -533,6 +540,8 @@ const anthropicModels = [
 | 
			
		||||
  "claude-3-5-sonnet-20240620",
 | 
			
		||||
  "claude-3-5-sonnet-20241022",
 | 
			
		||||
  "claude-3-5-sonnet-latest",
 | 
			
		||||
  "claude-3-7-sonnet-20250219",
 | 
			
		||||
  "claude-3-7-sonnet-latest",
 | 
			
		||||
];
 | 
			
		||||
 | 
			
		||||
const baiduModels = [
 | 
			
		||||
@@ -566,6 +575,9 @@ const alibabaModes = [
 | 
			
		||||
  "qwen-max-0403",
 | 
			
		||||
  "qwen-max-0107",
 | 
			
		||||
  "qwen-max-longcontext",
 | 
			
		||||
  "qwen-omni-turbo",
 | 
			
		||||
  "qwen-vl-plus",
 | 
			
		||||
  "qwen-vl-max",
 | 
			
		||||
];
 | 
			
		||||
 | 
			
		||||
const tencentModels = [
 | 
			
		||||
@@ -814,5 +826,5 @@ export const internalAllowedWebDavEndpoints = [
 | 
			
		||||
 | 
			
		||||
export const DEFAULT_GA_ID = "G-89WN60ZK2E";
 | 
			
		||||
 | 
			
		||||
export const SAAS_CHAT_URL = "https://nextchat.dev/chat";
 | 
			
		||||
export const SAAS_CHAT_UTM_URL = "https://nextchat.dev/chat?utm=github";
 | 
			
		||||
export const SAAS_CHAT_URL = "https://nextchat.club";
 | 
			
		||||
export const SAAS_CHAT_UTM_URL = "https://nextchat.club?utm=github";
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										832
									
								
								app/locales/da.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										832
									
								
								app/locales/da.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,832 @@
 | 
			
		||||
import { getClientConfig } from "../config/client";
 | 
			
		||||
import { SubmitKey } from "../store/config";
 | 
			
		||||
import { SAAS_CHAT_UTM_URL } from "@/app/constant";
 | 
			
		||||
import { PartialLocaleType } from "./index";
 | 
			
		||||
 | 
			
		||||
const isApp = !!getClientConfig()?.isApp;
 | 
			
		||||
const da: PartialLocaleType = {
 | 
			
		||||
  WIP: "Der kommer snart mere...",
 | 
			
		||||
  Error: {
 | 
			
		||||
    Unauthorized: isApp
 | 
			
		||||
      ? `Hov, der skete en fejl. Sådan kan du komme videre:
 | 
			
		||||
       \\ 1️⃣ Er du ny her? [Tryk for at starte nu 🚀](${SAAS_CHAT_UTM_URL})
 | 
			
		||||
       \\ 2️⃣ Vil du bruge dine egne OpenAI-nøgler? [Tryk her](/#/settings) for at ændre indstillinger ⚙️`
 | 
			
		||||
      : `Hov, der skete en fejl. Lad os løse det:
 | 
			
		||||
       \\ 1️⃣ Er du ny her? [Tryk for at starte nu 🚀](${SAAS_CHAT_UTM_URL})
 | 
			
		||||
       \\ 2️⃣ Bruger du en privat opsætning? [Tryk her](/#/auth) for at taste din nøgle 🔑
 | 
			
		||||
       \\ 3️⃣ Vil du bruge dine egne OpenAI-nøgler? [Tryk her](/#/settings) for at ændre indstillinger ⚙️
 | 
			
		||||
       `,
 | 
			
		||||
  },
 | 
			
		||||
  Auth: {
 | 
			
		||||
    Return: "Tilbage",
 | 
			
		||||
    Title: "Adgangskode",
 | 
			
		||||
    Tips: "Skriv venligst koden herunder",
 | 
			
		||||
    SubTips: "Eller brug din egen OpenAI- eller Google-nøgle",
 | 
			
		||||
    Input: "Adgangskode",
 | 
			
		||||
    Confirm: "OK",
 | 
			
		||||
    Later: "Senere",
 | 
			
		||||
    SaasTips: "Hvis det er for svært, kan du starte nu",
 | 
			
		||||
  },
 | 
			
		||||
  ChatItem: {
 | 
			
		||||
    ChatItemCount: (count: number) => `${count} beskeder`,
 | 
			
		||||
  },
 | 
			
		||||
  Chat: {
 | 
			
		||||
    SubTitle: (count: number) => `${count} beskeder`,
 | 
			
		||||
    EditMessage: {
 | 
			
		||||
      Title: "Rediger beskeder",
 | 
			
		||||
      Topic: {
 | 
			
		||||
        Title: "Emne",
 | 
			
		||||
        SubTitle: "Skift emne for denne chat",
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    Actions: {
 | 
			
		||||
      ChatList: "Gå til chatliste",
 | 
			
		||||
      CompressedHistory: "Komprimeret historie",
 | 
			
		||||
      Export: "Eksporter alle beskeder som Markdown",
 | 
			
		||||
      Copy: "Kopiér",
 | 
			
		||||
      Stop: "Stop",
 | 
			
		||||
      Retry: "Prøv igen",
 | 
			
		||||
      Pin: "Fastgør",
 | 
			
		||||
      PinToastContent: "1 besked er nu fastgjort",
 | 
			
		||||
      PinToastAction: "Se",
 | 
			
		||||
      Delete: "Slet",
 | 
			
		||||
      Edit: "Rediger",
 | 
			
		||||
      FullScreen: "Fuld skærm",
 | 
			
		||||
      RefreshTitle: "Opdatér titel",
 | 
			
		||||
      RefreshToast: "Anmodning om ny titel sendt",
 | 
			
		||||
      Speech: "Afspil",
 | 
			
		||||
      StopSpeech: "Stop",
 | 
			
		||||
    },
 | 
			
		||||
    Commands: {
 | 
			
		||||
      new: "Ny chat",
 | 
			
		||||
      newm: "Ny chat med persona",
 | 
			
		||||
      next: "Næste chat",
 | 
			
		||||
      prev: "Forrige chat",
 | 
			
		||||
      clear: "Ryd alt før",
 | 
			
		||||
      fork: "Kopiér chat",
 | 
			
		||||
      del: "Slet chat",
 | 
			
		||||
    },
 | 
			
		||||
    InputActions: {
 | 
			
		||||
      Stop: "Stop",
 | 
			
		||||
      ToBottom: "Ned til nyeste",
 | 
			
		||||
      Theme: {
 | 
			
		||||
        auto: "Automatisk",
 | 
			
		||||
        light: "Lyst tema",
 | 
			
		||||
        dark: "Mørkt tema",
 | 
			
		||||
      },
 | 
			
		||||
      Prompt: "Prompts",
 | 
			
		||||
      Masks: "Personaer",
 | 
			
		||||
      Clear: "Ryd kontekst",
 | 
			
		||||
      Settings: "Indstillinger",
 | 
			
		||||
      UploadImage: "Upload billeder",
 | 
			
		||||
    },
 | 
			
		||||
    Rename: "Omdøb chat",
 | 
			
		||||
    Typing: "Skriver…",
 | 
			
		||||
    Input: (submitKey: string) => {
 | 
			
		||||
      let inputHints = `${submitKey} for at sende`;
 | 
			
		||||
      if (submitKey === String(SubmitKey.Enter)) {
 | 
			
		||||
        inputHints += ", Shift + Enter for ny linje";
 | 
			
		||||
      }
 | 
			
		||||
      return (
 | 
			
		||||
        inputHints + ", / for at søge i prompts, : for at bruge kommandoer"
 | 
			
		||||
      );
 | 
			
		||||
    },
 | 
			
		||||
    Send: "Send",
 | 
			
		||||
    StartSpeak: "Start oplæsning",
 | 
			
		||||
    StopSpeak: "Stop oplæsning",
 | 
			
		||||
    Config: {
 | 
			
		||||
      Reset: "Nulstil til standard",
 | 
			
		||||
      SaveAs: "Gem som persona",
 | 
			
		||||
    },
 | 
			
		||||
    IsContext: "Ekstra prompt til baggrund",
 | 
			
		||||
    ShortcutKey: {
 | 
			
		||||
      Title: "Hurtigtaster",
 | 
			
		||||
      newChat: "Åbn ny chat",
 | 
			
		||||
      focusInput: "Fokus på tekstfeltet",
 | 
			
		||||
      copyLastMessage: "Kopiér sidste svar",
 | 
			
		||||
      copyLastCode: "Kopiér sidste kodeblok",
 | 
			
		||||
      showShortcutKey: "Vis hurtigtaster",
 | 
			
		||||
      clearContext: "Ryd kontekst",
 | 
			
		||||
    },
 | 
			
		||||
  },
 | 
			
		||||
  Export: {
 | 
			
		||||
    Title: "Eksportér beskeder",
 | 
			
		||||
    Copy: "Kopiér alt",
 | 
			
		||||
    Download: "Download",
 | 
			
		||||
    MessageFromYou: "Fra dig",
 | 
			
		||||
    MessageFromChatGPT: "Fra ChatGPT",
 | 
			
		||||
    Share: "Del til ShareGPT",
 | 
			
		||||
    Format: {
 | 
			
		||||
      Title: "Filformat",
 | 
			
		||||
      SubTitle: "Vælg enten Markdown eller PNG-billede",
 | 
			
		||||
    },
 | 
			
		||||
    IncludeContext: {
 | 
			
		||||
      Title: "Tag baggrund med",
 | 
			
		||||
      SubTitle: "Skal ekstra baggrund (persona) med i eksporten?",
 | 
			
		||||
    },
 | 
			
		||||
    Steps: {
 | 
			
		||||
      Select: "Vælg",
 | 
			
		||||
      Preview: "Forhåndsvis",
 | 
			
		||||
    },
 | 
			
		||||
    Image: {
 | 
			
		||||
      Toast: "Laver billede...",
 | 
			
		||||
      Modal: "Tryk længe eller højreklik for at gemme",
 | 
			
		||||
    },
 | 
			
		||||
    Artifacts: {
 | 
			
		||||
      Title: "Del side",
 | 
			
		||||
      Error: "Fejl ved deling",
 | 
			
		||||
    },
 | 
			
		||||
  },
 | 
			
		||||
  Select: {
 | 
			
		||||
    Search: "Søg",
 | 
			
		||||
    All: "Vælg alle",
 | 
			
		||||
    Latest: "Vælg nyeste",
 | 
			
		||||
    Clear: "Ryd alt",
 | 
			
		||||
  },
 | 
			
		||||
  Memory: {
 | 
			
		||||
    Title: "Huskesætning",
 | 
			
		||||
    EmptyContent: "Ingenting lige nu.",
 | 
			
		||||
    Send: "Send huskesætning",
 | 
			
		||||
    Copy: "Kopiér huskesætning",
 | 
			
		||||
    Reset: "Nulstil chat",
 | 
			
		||||
    ResetConfirm:
 | 
			
		||||
      "Dette sletter nuværende samtale og hukommelse. Er du sikker?",
 | 
			
		||||
  },
 | 
			
		||||
  Home: {
 | 
			
		||||
    NewChat: "Ny Chat",
 | 
			
		||||
    DeleteChat: "Vil du slette den valgte chat?",
 | 
			
		||||
    DeleteToast: "Chat slettet",
 | 
			
		||||
    Revert: "Fortryd",
 | 
			
		||||
  },
 | 
			
		||||
  Settings: {
 | 
			
		||||
    Title: "Indstillinger",
 | 
			
		||||
    SubTitle: "Alle indstillinger",
 | 
			
		||||
    ShowPassword: "Vis kodeord",
 | 
			
		||||
    Danger: {
 | 
			
		||||
      Reset: {
 | 
			
		||||
        Title: "Nulstil alle indstillinger",
 | 
			
		||||
        SubTitle: "Gendan alt til standard",
 | 
			
		||||
        Action: "Nulstil",
 | 
			
		||||
        Confirm: "Vil du virkelig nulstille alt?",
 | 
			
		||||
      },
 | 
			
		||||
      Clear: {
 | 
			
		||||
        Title: "Slet alle data",
 | 
			
		||||
        SubTitle: "Sletter alt om beskeder og indstillinger",
 | 
			
		||||
        Action: "Slet",
 | 
			
		||||
        Confirm: "Er du sikker på, at du vil slette alt?",
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    Lang: {
 | 
			
		||||
      Name: "Language",
 | 
			
		||||
      All: "Alle sprog",
 | 
			
		||||
    },
 | 
			
		||||
    Avatar: "Avatar",
 | 
			
		||||
    FontSize: {
 | 
			
		||||
      Title: "Skriftstørrelse",
 | 
			
		||||
      SubTitle: "Vælg, hvor stor teksten skal være",
 | 
			
		||||
    },
 | 
			
		||||
    FontFamily: {
 | 
			
		||||
      Title: "Skrifttype",
 | 
			
		||||
      SubTitle: "Hvis tom, bruger den standard skrifttype",
 | 
			
		||||
      Placeholder: "Skrifttype-navn",
 | 
			
		||||
    },
 | 
			
		||||
    InjectSystemPrompts: {
 | 
			
		||||
      Title: "Tilføj system-prompt",
 | 
			
		||||
      SubTitle: "Læg altid en ekstra prompt først i anmodninger",
 | 
			
		||||
    },
 | 
			
		||||
    InputTemplate: {
 | 
			
		||||
      Title: "Tekstskabelon",
 | 
			
		||||
      SubTitle: "Den seneste besked placeres i denne skabelon",
 | 
			
		||||
    },
 | 
			
		||||
    Update: {
 | 
			
		||||
      Version: (x: string) => `Version: ${x}`,
 | 
			
		||||
      IsLatest: "Du har nyeste version",
 | 
			
		||||
      CheckUpdate: "Tjek efter opdatering",
 | 
			
		||||
      IsChecking: "Tjekker...",
 | 
			
		||||
      FoundUpdate: (x: string) => `Ny version fundet: ${x}`,
 | 
			
		||||
      GoToUpdate: "Opdatér",
 | 
			
		||||
      Success: "Opdatering lykkedes.",
 | 
			
		||||
      Failed: "Opdatering mislykkedes.",
 | 
			
		||||
    },
 | 
			
		||||
    SendKey: "Tast for send",
 | 
			
		||||
    Theme: "Tema",
 | 
			
		||||
    TightBorder: "Stram kant",
 | 
			
		||||
    SendPreviewBubble: {
 | 
			
		||||
      Title: "Forhåndsvisnings-boble",
 | 
			
		||||
      SubTitle: "Vis tekst, før den sendes",
 | 
			
		||||
    },
 | 
			
		||||
    AutoGenerateTitle: {
 | 
			
		||||
      Title: "Lav titel automatisk",
 | 
			
		||||
      SubTitle: "Foreslå en titel ud fra chatten",
 | 
			
		||||
    },
 | 
			
		||||
    Sync: {
 | 
			
		||||
      CloudState: "Seneste opdatering",
 | 
			
		||||
      NotSyncYet: "Endnu ikke synkroniseret",
 | 
			
		||||
      Success: "Synkronisering lykkedes",
 | 
			
		||||
      Fail: "Synkronisering mislykkedes",
 | 
			
		||||
      Config: {
 | 
			
		||||
        Modal: {
 | 
			
		||||
          Title: "Indstil synk",
 | 
			
		||||
          Check: "Tjek forbindelse",
 | 
			
		||||
        },
 | 
			
		||||
        SyncType: {
 | 
			
		||||
          Title: "Synk-type",
 | 
			
		||||
          SubTitle: "Vælg en synk-tjeneste",
 | 
			
		||||
        },
 | 
			
		||||
        Proxy: {
 | 
			
		||||
          Title: "Aktivér proxy",
 | 
			
		||||
          SubTitle: "Brug proxy for at undgå netværksproblemer",
 | 
			
		||||
        },
 | 
			
		||||
        ProxyUrl: {
 | 
			
		||||
          Title: "Proxy-adresse",
 | 
			
		||||
          SubTitle: "Bruges kun til projektets egen proxy",
 | 
			
		||||
        },
 | 
			
		||||
        WebDav: {
 | 
			
		||||
          Endpoint: "WebDAV-adresse",
 | 
			
		||||
          UserName: "Brugernavn",
 | 
			
		||||
          Password: "Kodeord",
 | 
			
		||||
        },
 | 
			
		||||
        UpStash: {
 | 
			
		||||
          Endpoint: "UpStash Redis REST URL",
 | 
			
		||||
          UserName: "Backup-navn",
 | 
			
		||||
          Password: "UpStash Redis REST Token",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      LocalState: "Lokale data",
 | 
			
		||||
      Overview: (overview: any) =>
 | 
			
		||||
        `${overview.chat} chats, ${overview.message} beskeder, ${overview.prompt} prompts, ${overview.mask} personaer`,
 | 
			
		||||
      ImportFailed: "Import mislykkedes",
 | 
			
		||||
    },
 | 
			
		||||
    Mask: {
 | 
			
		||||
      Splash: {
 | 
			
		||||
        Title: "Persona-forside",
 | 
			
		||||
        SubTitle: "Vis denne side, når du opretter ny chat",
 | 
			
		||||
      },
 | 
			
		||||
      Builtin: {
 | 
			
		||||
        Title: "Skjul indbyggede personaer",
 | 
			
		||||
        SubTitle: "Vis ikke de indbyggede personaer i listen",
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    Prompt: {
 | 
			
		||||
      Disable: {
 | 
			
		||||
        Title: "Slå auto-forslag fra",
 | 
			
		||||
        SubTitle: "Tast / for at få forslag",
 | 
			
		||||
      },
 | 
			
		||||
      List: "Prompt-liste",
 | 
			
		||||
      ListCount: (builtin: number, custom: number) =>
 | 
			
		||||
        `${builtin} indbygget, ${custom} brugerdefineret`,
 | 
			
		||||
      Edit: "Rediger",
 | 
			
		||||
      Modal: {
 | 
			
		||||
        Title: "Prompt-liste",
 | 
			
		||||
        Add: "Tilføj",
 | 
			
		||||
        Search: "Søg prompts",
 | 
			
		||||
      },
 | 
			
		||||
      EditModal: {
 | 
			
		||||
        Title: "Rediger prompt",
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    HistoryCount: {
 | 
			
		||||
      Title: "Antal beskeder, der følger med",
 | 
			
		||||
      SubTitle: "Hvor mange af de tidligere beskeder, der sendes hver gang",
 | 
			
		||||
    },
 | 
			
		||||
    CompressThreshold: {
 | 
			
		||||
      Title: "Komprimeringsgrænse",
 | 
			
		||||
      SubTitle:
 | 
			
		||||
        "Hvis chatten bliver for lang, vil den komprimeres efter dette antal tegn",
 | 
			
		||||
    },
 | 
			
		||||
    Usage: {
 | 
			
		||||
      Title: "Brug og saldo",
 | 
			
		||||
      SubTitle(used: any, total: any) {
 | 
			
		||||
        return `Du har brugt $${used} i denne måned, og din grænse er $${total}.`;
 | 
			
		||||
      },
 | 
			
		||||
      IsChecking: "Tjekker...",
 | 
			
		||||
      Check: "Tjek igen",
 | 
			
		||||
      NoAccess: "Indtast API-nøgle for at se forbrug",
 | 
			
		||||
    },
 | 
			
		||||
    Access: {
 | 
			
		||||
      AccessCode: {
 | 
			
		||||
        Title: "Adgangskode",
 | 
			
		||||
        SubTitle: "Adgangskontrol er slået til",
 | 
			
		||||
        Placeholder: "Skriv kode her",
 | 
			
		||||
      },
 | 
			
		||||
      CustomEndpoint: {
 | 
			
		||||
        Title: "Brugerdefineret adresse",
 | 
			
		||||
        SubTitle: "Brug Azure eller OpenAI fra egen server",
 | 
			
		||||
      },
 | 
			
		||||
      Provider: {
 | 
			
		||||
        Title: "Model-udbyder",
 | 
			
		||||
        SubTitle: "Vælg Azure eller OpenAI",
 | 
			
		||||
      },
 | 
			
		||||
      OpenAI: {
 | 
			
		||||
        ApiKey: {
 | 
			
		||||
          Title: "OpenAI API-nøgle",
 | 
			
		||||
          SubTitle: "Brug din egen nøgle",
 | 
			
		||||
          Placeholder: "sk-xxx",
 | 
			
		||||
        },
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "OpenAI Endpoint",
 | 
			
		||||
          SubTitle: "Skal starte med http(s):// eller /api/openai som standard",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      Azure: {
 | 
			
		||||
        ApiKey: {
 | 
			
		||||
          Title: "Azure Api Key",
 | 
			
		||||
          SubTitle: "Hent din nøgle fra Azure-portalen",
 | 
			
		||||
          Placeholder: "Azure Api Key",
 | 
			
		||||
        },
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "Azure Endpoint",
 | 
			
		||||
          SubTitle: "F.eks.: ",
 | 
			
		||||
        },
 | 
			
		||||
        ApiVerion: {
 | 
			
		||||
          Title: "Azure Api Version",
 | 
			
		||||
          SubTitle: "Hentet fra Azure-portalen",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      Anthropic: {
 | 
			
		||||
        ApiKey: {
 | 
			
		||||
          Title: "Anthropic API-nøgle",
 | 
			
		||||
          SubTitle: "Brug din egen Anthropic-nøgle",
 | 
			
		||||
          Placeholder: "Anthropic API Key",
 | 
			
		||||
        },
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "Endpoint-adresse",
 | 
			
		||||
          SubTitle: "F.eks.: ",
 | 
			
		||||
        },
 | 
			
		||||
        ApiVerion: {
 | 
			
		||||
          Title: "API-version (Claude)",
 | 
			
		||||
          SubTitle: "Vælg den ønskede version",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      Baidu: {
 | 
			
		||||
        ApiKey: {
 | 
			
		||||
          Title: "Baidu-nøgle",
 | 
			
		||||
          SubTitle: "Din egen Baidu-nøgle",
 | 
			
		||||
          Placeholder: "Baidu API Key",
 | 
			
		||||
        },
 | 
			
		||||
        SecretKey: {
 | 
			
		||||
          Title: "Baidu hemmelig nøgle",
 | 
			
		||||
          SubTitle: "Din egen hemmelige nøgle fra Baidu",
 | 
			
		||||
          Placeholder: "Baidu Secret Key",
 | 
			
		||||
        },
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "Adresse",
 | 
			
		||||
          SubTitle: "Kan ikke ændres, se .env",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      Tencent: {
 | 
			
		||||
        ApiKey: {
 | 
			
		||||
          Title: "Tencent-nøgle",
 | 
			
		||||
          SubTitle: "Din egen nøgle fra Tencent",
 | 
			
		||||
          Placeholder: "Tencent API Key",
 | 
			
		||||
        },
 | 
			
		||||
        SecretKey: {
 | 
			
		||||
          Title: "Tencent hemmelig nøgle",
 | 
			
		||||
          SubTitle: "Din egen hemmelige nøgle fra Tencent",
 | 
			
		||||
          Placeholder: "Tencent Secret Key",
 | 
			
		||||
        },
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "Adresse",
 | 
			
		||||
          SubTitle: "Kan ikke ændres, se .env",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      ByteDance: {
 | 
			
		||||
        ApiKey: {
 | 
			
		||||
          Title: "ByteDance-nøgle",
 | 
			
		||||
          SubTitle: "Din egen nøgle til ByteDance",
 | 
			
		||||
          Placeholder: "ByteDance API Key",
 | 
			
		||||
        },
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "Adresse",
 | 
			
		||||
          SubTitle: "F.eks.: ",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      Alibaba: {
 | 
			
		||||
        ApiKey: {
 | 
			
		||||
          Title: "Alibaba-nøgle",
 | 
			
		||||
          SubTitle: "Din egen Alibaba Cloud-nøgle",
 | 
			
		||||
          Placeholder: "Alibaba Cloud API Key",
 | 
			
		||||
        },
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "Adresse",
 | 
			
		||||
          SubTitle: "F.eks.: ",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      Moonshot: {
 | 
			
		||||
        ApiKey: {
 | 
			
		||||
          Title: "Moonshot-nøgle",
 | 
			
		||||
          SubTitle: "Din egen Moonshot-nøgle",
 | 
			
		||||
          Placeholder: "Moonshot API Key",
 | 
			
		||||
        },
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "Adresse",
 | 
			
		||||
          SubTitle: "F.eks.: ",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      DeepSeek: {
 | 
			
		||||
        ApiKey: {
 | 
			
		||||
          Title: "DeepSeek-nøgle",
 | 
			
		||||
          SubTitle: "Din egen DeepSeek-nøgle",
 | 
			
		||||
          Placeholder: "DeepSeek API Key",
 | 
			
		||||
        },
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "Adresse",
 | 
			
		||||
          SubTitle: "F.eks.: ",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      XAI: {
 | 
			
		||||
        ApiKey: {
 | 
			
		||||
          Title: "XAI-nøgle",
 | 
			
		||||
          SubTitle: "Din egen XAI-nøgle",
 | 
			
		||||
          Placeholder: "XAI API Key",
 | 
			
		||||
        },
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "Adresse",
 | 
			
		||||
          SubTitle: "F.eks.: ",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      ChatGLM: {
 | 
			
		||||
        ApiKey: {
 | 
			
		||||
          Title: "ChatGLM-nøgle",
 | 
			
		||||
          SubTitle: "Din egen ChatGLM-nøgle",
 | 
			
		||||
          Placeholder: "ChatGLM API Key",
 | 
			
		||||
        },
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "Adresse",
 | 
			
		||||
          SubTitle: "F.eks.: ",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      SiliconFlow: {
 | 
			
		||||
        ApiKey: {
 | 
			
		||||
          Title: "SiliconFlow-nøgle",
 | 
			
		||||
          SubTitle: "Din egen SiliconFlow-nøgle",
 | 
			
		||||
          Placeholder: "SiliconFlow API Key",
 | 
			
		||||
        },
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "Adresse",
 | 
			
		||||
          SubTitle: "F.eks.: ",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      Stability: {
 | 
			
		||||
        ApiKey: {
 | 
			
		||||
          Title: "Stability-nøgle",
 | 
			
		||||
          SubTitle: "Din egen Stability-nøgle",
 | 
			
		||||
          Placeholder: "Stability API Key",
 | 
			
		||||
        },
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "Adresse",
 | 
			
		||||
          SubTitle: "F.eks.: ",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      Iflytek: {
 | 
			
		||||
        ApiKey: {
 | 
			
		||||
          Title: "Iflytek API Key",
 | 
			
		||||
          SubTitle: "Nøgle fra Iflytek",
 | 
			
		||||
          Placeholder: "Iflytek API Key",
 | 
			
		||||
        },
 | 
			
		||||
        ApiSecret: {
 | 
			
		||||
          Title: "Iflytek hemmelig nøgle",
 | 
			
		||||
          SubTitle: "Hentet fra Iflytek",
 | 
			
		||||
          Placeholder: "Iflytek API Secret",
 | 
			
		||||
        },
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "Adresse",
 | 
			
		||||
          SubTitle: "F.eks.: ",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      CustomModel: {
 | 
			
		||||
        Title: "Egne modelnavne",
 | 
			
		||||
        SubTitle: "Skriv komma-adskilte navne",
 | 
			
		||||
      },
 | 
			
		||||
      Google: {
 | 
			
		||||
        ApiKey: {
 | 
			
		||||
          Title: "Google-nøgle",
 | 
			
		||||
          SubTitle: "Få din nøgle hos Google AI",
 | 
			
		||||
          Placeholder: "Google AI API Key",
 | 
			
		||||
        },
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "Adresse",
 | 
			
		||||
          SubTitle: "F.eks.: ",
 | 
			
		||||
        },
 | 
			
		||||
        ApiVersion: {
 | 
			
		||||
          Title: "API-version (til gemini-pro)",
 | 
			
		||||
          SubTitle: "Vælg en bestemt version",
 | 
			
		||||
        },
 | 
			
		||||
        GoogleSafetySettings: {
 | 
			
		||||
          Title: "Google sikkerhedsindstillinger",
 | 
			
		||||
          SubTitle: "Vælg et niveau for indholdskontrol",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    Model: "Model",
 | 
			
		||||
    CompressModel: {
 | 
			
		||||
      Title: "Opsummeringsmodel",
 | 
			
		||||
      SubTitle: "Bruges til at korte historik ned og lave titel",
 | 
			
		||||
    },
 | 
			
		||||
    Temperature: {
 | 
			
		||||
      Title: "Temperatur",
 | 
			
		||||
      SubTitle: "Jo højere tal, jo mere kreativt svar",
 | 
			
		||||
    },
 | 
			
		||||
    TopP: {
 | 
			
		||||
      Title: "Top P",
 | 
			
		||||
      SubTitle: "Skal ikke ændres sammen med temperatur",
 | 
			
		||||
    },
 | 
			
		||||
    MaxTokens: {
 | 
			
		||||
      Title: "Maks. længde",
 | 
			
		||||
      SubTitle: "Hvor mange tokens (ord/stykker tekst) der kan bruges",
 | 
			
		||||
    },
 | 
			
		||||
    PresencePenalty: {
 | 
			
		||||
      Title: "Nye emner",
 | 
			
		||||
      SubTitle: "Jo højere tal, jo mere nyt indhold",
 | 
			
		||||
    },
 | 
			
		||||
    FrequencyPenalty: {
 | 
			
		||||
      Title: "Gentagelsesstraf",
 | 
			
		||||
      SubTitle: "Jo højere tal, jo mindre gentagelse",
 | 
			
		||||
    },
 | 
			
		||||
    TTS: {
 | 
			
		||||
      Enable: {
 | 
			
		||||
        Title: "Tænd for oplæsning (TTS)",
 | 
			
		||||
        SubTitle: "Slå tekst-til-tale til",
 | 
			
		||||
      },
 | 
			
		||||
      Autoplay: {
 | 
			
		||||
        Title: "Automatisk oplæsning",
 | 
			
		||||
        SubTitle: "Laver lyd automatisk, hvis TTS er slået til",
 | 
			
		||||
      },
 | 
			
		||||
      Model: "Model",
 | 
			
		||||
      Voice: {
 | 
			
		||||
        Title: "Stemme",
 | 
			
		||||
        SubTitle: "Hvilken stemme der bruges til lyd",
 | 
			
		||||
      },
 | 
			
		||||
      Speed: {
 | 
			
		||||
        Title: "Hastighed",
 | 
			
		||||
        SubTitle: "Hvor hurtigt der oplæses",
 | 
			
		||||
      },
 | 
			
		||||
      Engine: "TTS-motor",
 | 
			
		||||
    },
 | 
			
		||||
    Realtime: {
 | 
			
		||||
      Enable: {
 | 
			
		||||
        Title: "Live-chat",
 | 
			
		||||
        SubTitle: "Slå live-svar til",
 | 
			
		||||
      },
 | 
			
		||||
      Provider: {
 | 
			
		||||
        Title: "Modeludbyder",
 | 
			
		||||
        SubTitle: "Vælg forskellig udbyder",
 | 
			
		||||
      },
 | 
			
		||||
      Model: {
 | 
			
		||||
        Title: "Model",
 | 
			
		||||
        SubTitle: "Vælg en model",
 | 
			
		||||
      },
 | 
			
		||||
      ApiKey: {
 | 
			
		||||
        Title: "API-nøgle",
 | 
			
		||||
        SubTitle: "Din nøgle",
 | 
			
		||||
        Placeholder: "API-nøgle",
 | 
			
		||||
      },
 | 
			
		||||
      Azure: {
 | 
			
		||||
        Endpoint: {
 | 
			
		||||
          Title: "Adresse",
 | 
			
		||||
          SubTitle: "Endpoint til Azure",
 | 
			
		||||
        },
 | 
			
		||||
        Deployment: {
 | 
			
		||||
          Title: "Udrulningsnavn",
 | 
			
		||||
          SubTitle: "Navn for dit Azure-setup",
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      Temperature: {
 | 
			
		||||
        Title: "Temperatur",
 | 
			
		||||
        SubTitle: "Højere tal = mere varierede svar",
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
  },
 | 
			
		||||
  Store: {
 | 
			
		||||
    DefaultTopic: "Ny samtale",
 | 
			
		||||
    BotHello: "Hej! Hvordan kan jeg hjælpe dig i dag?",
 | 
			
		||||
    Error: "Noget gik galt. Prøv igen senere.",
 | 
			
		||||
    Prompt: {
 | 
			
		||||
      History: (content: string) =>
 | 
			
		||||
        "Her er et kort resume af, hvad vi har snakket om: " + content,
 | 
			
		||||
      Topic:
 | 
			
		||||
        "Find en kort overskrift med 4-5 ord om emnet. Ingen tegnsætning eller anførselstegn.",
 | 
			
		||||
      Summarize:
 | 
			
		||||
        "Skriv et kort resumé (under 200 ord) af vores samtale til senere brug.",
 | 
			
		||||
    },
 | 
			
		||||
  },
 | 
			
		||||
  Copy: {
 | 
			
		||||
    Success: "Kopieret",
 | 
			
		||||
    Failed: "Kunne ikke kopiere. Giv adgang til udklipsholder.",
 | 
			
		||||
  },
 | 
			
		||||
  Download: {
 | 
			
		||||
    Success: "Filen er downloadet.",
 | 
			
		||||
    Failed: "Download fejlede.",
 | 
			
		||||
  },
 | 
			
		||||
  Context: {
 | 
			
		||||
    Toast: (x: any) => `Inkluderer ${x} ekstra prompts`,
 | 
			
		||||
    Edit: "Chatindstillinger",
 | 
			
		||||
    Add: "Tilføj prompt",
 | 
			
		||||
    Clear: "Kontekst ryddet",
 | 
			
		||||
    Revert: "Fortryd",
 | 
			
		||||
  },
 | 
			
		||||
  Discovery: {
 | 
			
		||||
    Name: "Søgning og plugins",
 | 
			
		||||
  },
 | 
			
		||||
  Mcp: {
 | 
			
		||||
    Name: "MCP",
 | 
			
		||||
  },
 | 
			
		||||
  FineTuned: {
 | 
			
		||||
    Sysmessage: "Du er en hjælper, der skal...",
 | 
			
		||||
  },
 | 
			
		||||
  SearchChat: {
 | 
			
		||||
    Name: "Søg",
 | 
			
		||||
    Page: {
 | 
			
		||||
      Title: "Søg i tidligere chats",
 | 
			
		||||
      Search: "Skriv her for at søge",
 | 
			
		||||
      NoResult: "Ingen resultater",
 | 
			
		||||
      NoData: "Ingen data",
 | 
			
		||||
      Loading: "Henter...",
 | 
			
		||||
      SubTitle: (count: number) => `Fandt ${count} resultater`,
 | 
			
		||||
    },
 | 
			
		||||
    Item: {
 | 
			
		||||
      View: "Vis",
 | 
			
		||||
    },
 | 
			
		||||
  },
 | 
			
		||||
  Plugin: {
 | 
			
		||||
    Name: "Plugin",
 | 
			
		||||
    Page: {
 | 
			
		||||
      Title: "Plugins",
 | 
			
		||||
      SubTitle: (count: number) => `${count} plugins`,
 | 
			
		||||
      Search: "Søg plugin",
 | 
			
		||||
      Create: "Opret nyt",
 | 
			
		||||
      Find: "Du kan finde flere plugins på GitHub: ",
 | 
			
		||||
    },
 | 
			
		||||
    Item: {
 | 
			
		||||
      Info: (count: number) => `${count} metode`,
 | 
			
		||||
      View: "Vis",
 | 
			
		||||
      Edit: "Rediger",
 | 
			
		||||
      Delete: "Slet",
 | 
			
		||||
      DeleteConfirm: "Vil du slette?",
 | 
			
		||||
    },
 | 
			
		||||
    Auth: {
 | 
			
		||||
      None: "Ingen",
 | 
			
		||||
      Basic: "Basic",
 | 
			
		||||
      Bearer: "Bearer",
 | 
			
		||||
      Custom: "Tilpasset",
 | 
			
		||||
      CustomHeader: "Parameternavn",
 | 
			
		||||
      Token: "Token",
 | 
			
		||||
      Proxy: "Brug Proxy",
 | 
			
		||||
      ProxyDescription: "Løs CORS-problemer med Proxy",
 | 
			
		||||
      Location: "Sted",
 | 
			
		||||
      LocationHeader: "Header",
 | 
			
		||||
      LocationQuery: "Query",
 | 
			
		||||
      LocationBody: "Body",
 | 
			
		||||
    },
 | 
			
		||||
    EditModal: {
 | 
			
		||||
      Title: (readonly: boolean) =>
 | 
			
		||||
        `Rediger Plugin ${readonly ? "(skrivebeskyttet)" : ""}`,
 | 
			
		||||
      Download: "Download",
 | 
			
		||||
      Auth: "Godkendelsestype",
 | 
			
		||||
      Content: "OpenAPI Schema",
 | 
			
		||||
      Load: "Hent fra URL",
 | 
			
		||||
      Method: "Metode",
 | 
			
		||||
      Error: "Fejl i OpenAPI Schema",
 | 
			
		||||
    },
 | 
			
		||||
  },
 | 
			
		||||
  Mask: {
 | 
			
		||||
    Name: "Persona",
 | 
			
		||||
    Page: {
 | 
			
		||||
      Title: "Prompts som personaer",
 | 
			
		||||
      SubTitle: (count: number) => `${count} skabeloner`,
 | 
			
		||||
      Search: "Søg skabeloner",
 | 
			
		||||
      Create: "Opret ny",
 | 
			
		||||
    },
 | 
			
		||||
    Item: {
 | 
			
		||||
      Info: (count: number) => `${count} prompts`,
 | 
			
		||||
      Chat: "Chat",
 | 
			
		||||
      View: "Vis",
 | 
			
		||||
      Edit: "Rediger",
 | 
			
		||||
      Delete: "Slet",
 | 
			
		||||
      DeleteConfirm: "Vil du slette?",
 | 
			
		||||
    },
 | 
			
		||||
    EditModal: {
 | 
			
		||||
      Title: (readonly: boolean) =>
 | 
			
		||||
        `Rediger skabelon ${readonly ? "(skrivebeskyttet)" : ""}`,
 | 
			
		||||
      Download: "Download",
 | 
			
		||||
      Clone: "Klon",
 | 
			
		||||
    },
 | 
			
		||||
    Config: {
 | 
			
		||||
      Avatar: "Chat-avatar",
 | 
			
		||||
      Name: "Chat-navn",
 | 
			
		||||
      Sync: {
 | 
			
		||||
        Title: "Brug globale indstillinger",
 | 
			
		||||
        SubTitle: "Gældende for denne chat",
 | 
			
		||||
        Confirm: "Erstat nuværende indstillinger med globale?",
 | 
			
		||||
      },
 | 
			
		||||
      HideContext: {
 | 
			
		||||
        Title: "Skjul ekstra prompts",
 | 
			
		||||
        SubTitle: "Vis dem ikke på chat-skærmen",
 | 
			
		||||
      },
 | 
			
		||||
      Artifacts: {
 | 
			
		||||
        Title: "Brug Artefakter",
 | 
			
		||||
        SubTitle: "Gør det muligt at vise HTML-sider",
 | 
			
		||||
      },
 | 
			
		||||
      CodeFold: {
 | 
			
		||||
        Title: "Fold kode sammen",
 | 
			
		||||
        SubTitle: "Luk/åbn lange kodestykker automatisk",
 | 
			
		||||
      },
 | 
			
		||||
      Share: {
 | 
			
		||||
        Title: "Del denne persona",
 | 
			
		||||
        SubTitle: "Få et link til denne skabelon",
 | 
			
		||||
        Action: "Kopiér link",
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
  },
 | 
			
		||||
  NewChat: {
 | 
			
		||||
    Return: "Tilbage",
 | 
			
		||||
    Skip: "Start straks",
 | 
			
		||||
    Title: "Vælg en persona",
 | 
			
		||||
    SubTitle: "Chat med den persona, du vælger",
 | 
			
		||||
    More: "Se flere",
 | 
			
		||||
    NotShow: "Vis ikke igen",
 | 
			
		||||
    ConfirmNoShow:
 | 
			
		||||
      "Er du sikker på, at du ikke vil se det igen? Du kan altid slå det til under indstillinger.",
 | 
			
		||||
  },
 | 
			
		||||
  UI: {
 | 
			
		||||
    Confirm: "OK",
 | 
			
		||||
    Cancel: "Fortryd",
 | 
			
		||||
    Close: "Luk",
 | 
			
		||||
    Create: "Opret",
 | 
			
		||||
    Edit: "Rediger",
 | 
			
		||||
    Export: "Eksporter",
 | 
			
		||||
    Import: "Importér",
 | 
			
		||||
    Sync: "Synk",
 | 
			
		||||
    Config: "Konfigurer",
 | 
			
		||||
  },
 | 
			
		||||
  Exporter: {
 | 
			
		||||
    Description: {
 | 
			
		||||
      Title: "Kun beskeder efter sidste rydning vises",
 | 
			
		||||
    },
 | 
			
		||||
    Model: "Model",
 | 
			
		||||
    Messages: "Beskeder",
 | 
			
		||||
    Topic: "Emne",
 | 
			
		||||
    Time: "Tid",
 | 
			
		||||
  },
 | 
			
		||||
  URLCommand: {
 | 
			
		||||
    Code: "Så ud til, at der var en kode i linket. Vil du bruge den?",
 | 
			
		||||
    Settings: "Så ud til, at der var indstillinger i linket. Vil du bruge dem?",
 | 
			
		||||
  },
 | 
			
		||||
  SdPanel: {
 | 
			
		||||
    Prompt: "Prompt",
 | 
			
		||||
    NegativePrompt: "Negativ prompt",
 | 
			
		||||
    PleaseInput: (name: string) => `Indtast: ${name}`,
 | 
			
		||||
    AspectRatio: "Billedformat",
 | 
			
		||||
    ImageStyle: "Stil",
 | 
			
		||||
    OutFormat: "Uddataformat",
 | 
			
		||||
    AIModel: "AI-model",
 | 
			
		||||
    ModelVersion: "Version",
 | 
			
		||||
    Submit: "Send",
 | 
			
		||||
    ParamIsRequired: (name: string) => `${name} er krævet`,
 | 
			
		||||
    Styles: {
 | 
			
		||||
      D3Model: "3d-model",
 | 
			
		||||
      AnalogFilm: "analog-film",
 | 
			
		||||
      Anime: "anime",
 | 
			
		||||
      Cinematic: "cinematisk",
 | 
			
		||||
      ComicBook: "tegneserie",
 | 
			
		||||
      DigitalArt: "digital-art",
 | 
			
		||||
      Enhance: "enhance",
 | 
			
		||||
      FantasyArt: "fantasy-art",
 | 
			
		||||
      Isometric: "isometric",
 | 
			
		||||
      LineArt: "line-art",
 | 
			
		||||
      LowPoly: "low-poly",
 | 
			
		||||
      ModelingCompound: "modeling-compound",
 | 
			
		||||
      NeonPunk: "neon-punk",
 | 
			
		||||
      Origami: "origami",
 | 
			
		||||
      Photographic: "fotografisk",
 | 
			
		||||
      PixelArt: "pixel-art",
 | 
			
		||||
      TileTexture: "tile-texture",
 | 
			
		||||
    },
 | 
			
		||||
  },
 | 
			
		||||
  Sd: {
 | 
			
		||||
    SubTitle: (count: number) => `${count} billeder`,
 | 
			
		||||
    Actions: {
 | 
			
		||||
      Params: "Se indstillinger",
 | 
			
		||||
      Copy: "Kopiér prompt",
 | 
			
		||||
      Delete: "Slet",
 | 
			
		||||
      Retry: "Prøv igen",
 | 
			
		||||
      ReturnHome: "Til forsiden",
 | 
			
		||||
      History: "Historik",
 | 
			
		||||
    },
 | 
			
		||||
    EmptyRecord: "Ingen billeder endnu",
 | 
			
		||||
    Status: {
 | 
			
		||||
      Name: "Status",
 | 
			
		||||
      Success: "Ok",
 | 
			
		||||
      Error: "Fejl",
 | 
			
		||||
      Wait: "Venter",
 | 
			
		||||
      Running: "I gang",
 | 
			
		||||
    },
 | 
			
		||||
    Danger: {
 | 
			
		||||
      Delete: "Vil du slette?",
 | 
			
		||||
    },
 | 
			
		||||
    GenerateParams: "Genereringsvalg",
 | 
			
		||||
    Detail: "Detaljer",
 | 
			
		||||
  },
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export default da;
 | 
			
		||||
@@ -2,6 +2,7 @@ import cn from "./cn";
 | 
			
		||||
import en from "./en";
 | 
			
		||||
import pt from "./pt";
 | 
			
		||||
import tw from "./tw";
 | 
			
		||||
import da from "./da";
 | 
			
		||||
import id from "./id";
 | 
			
		||||
import fr from "./fr";
 | 
			
		||||
import es from "./es";
 | 
			
		||||
@@ -30,6 +31,7 @@ const ALL_LANGS = {
 | 
			
		||||
  en,
 | 
			
		||||
  tw,
 | 
			
		||||
  pt,
 | 
			
		||||
  da,
 | 
			
		||||
  jp,
 | 
			
		||||
  ko,
 | 
			
		||||
  id,
 | 
			
		||||
@@ -56,6 +58,7 @@ export const ALL_LANG_OPTIONS: Record<Lang, string> = {
 | 
			
		||||
  en: "English",
 | 
			
		||||
  pt: "Português",
 | 
			
		||||
  tw: "繁體中文",
 | 
			
		||||
  da: "Dansk",
 | 
			
		||||
  jp: "日本語",
 | 
			
		||||
  ko: "한국어",
 | 
			
		||||
  id: "Indonesia",
 | 
			
		||||
@@ -141,6 +144,7 @@ export const STT_LANG_MAP: Record<Lang, string> = {
 | 
			
		||||
  en: "en-US",
 | 
			
		||||
  pt: "pt-BR",
 | 
			
		||||
  tw: "zh-TW",
 | 
			
		||||
  da: "da-DK",
 | 
			
		||||
  jp: "ja-JP",
 | 
			
		||||
  ko: "ko-KR",
 | 
			
		||||
  id: "id-ID",
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										20
									
								
								app/utils.ts
									
									
									
									
									
								
							
							
						
						
									
										20
									
								
								app/utils.ts
									
									
									
									
									
								
							@@ -2,7 +2,11 @@ import { useEffect, useState } from "react";
 | 
			
		||||
import { showToast } from "./components/ui-lib";
 | 
			
		||||
import Locale from "./locales";
 | 
			
		||||
import { RequestMessage } from "./client/api";
 | 
			
		||||
import { ServiceProvider } from "./constant";
 | 
			
		||||
import {
 | 
			
		||||
  REQUEST_TIMEOUT_MS,
 | 
			
		||||
  REQUEST_TIMEOUT_MS_FOR_THINKING,
 | 
			
		||||
  ServiceProvider,
 | 
			
		||||
} from "./constant";
 | 
			
		||||
// import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http";
 | 
			
		||||
import { fetch as tauriStreamFetch } from "./utils/stream";
 | 
			
		||||
import { VISION_MODEL_REGEXES, EXCLUDE_VISION_MODEL_REGEXES } from "./constant";
 | 
			
		||||
@@ -292,6 +296,20 @@ export function isDalle3(model: string) {
 | 
			
		||||
  return "dall-e-3" === model;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export function getTimeoutMSByModel(model: string) {
 | 
			
		||||
  model = model.toLowerCase();
 | 
			
		||||
  if (
 | 
			
		||||
    model.startsWith("dall-e") ||
 | 
			
		||||
    model.startsWith("dalle") ||
 | 
			
		||||
    model.startsWith("o1") ||
 | 
			
		||||
    model.startsWith("o3") ||
 | 
			
		||||
    model.includes("deepseek-r") ||
 | 
			
		||||
    model.includes("-thinking")
 | 
			
		||||
  )
 | 
			
		||||
    return REQUEST_TIMEOUT_MS_FOR_THINKING;
 | 
			
		||||
  return REQUEST_TIMEOUT_MS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export function getModelSizes(model: string): ModelSize[] {
 | 
			
		||||
  if (isDalle3(model)) {
 | 
			
		||||
    return ["1024x1024", "1792x1024", "1024x1792"];
 | 
			
		||||
 
 | 
			
		||||
@@ -3,7 +3,7 @@ import {
 | 
			
		||||
  UPLOAD_URL,
 | 
			
		||||
  REQUEST_TIMEOUT_MS,
 | 
			
		||||
} from "@/app/constant";
 | 
			
		||||
import { RequestMessage } from "@/app/client/api";
 | 
			
		||||
import { MultimodalContent, RequestMessage } from "@/app/client/api";
 | 
			
		||||
import Locale from "@/app/locales";
 | 
			
		||||
import {
 | 
			
		||||
  EventStreamContentType,
 | 
			
		||||
@@ -70,8 +70,9 @@ export function compressImage(file: Blob, maxSize: number): Promise<string> {
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function preProcessImageContent(
 | 
			
		||||
export async function preProcessImageContentBase(
 | 
			
		||||
  content: RequestMessage["content"],
 | 
			
		||||
  transformImageUrl: (url: string) => Promise<{ [key: string]: any }>,
 | 
			
		||||
) {
 | 
			
		||||
  if (typeof content === "string") {
 | 
			
		||||
    return content;
 | 
			
		||||
@@ -81,7 +82,7 @@ export async function preProcessImageContent(
 | 
			
		||||
    if (part?.type == "image_url" && part?.image_url?.url) {
 | 
			
		||||
      try {
 | 
			
		||||
        const url = await cacheImageToBase64Image(part?.image_url?.url);
 | 
			
		||||
        result.push({ type: part.type, image_url: { url } });
 | 
			
		||||
        result.push(await transformImageUrl(url));
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        console.error("Error processing image URL:", error);
 | 
			
		||||
      }
 | 
			
		||||
@@ -92,6 +93,23 @@ export async function preProcessImageContent(
 | 
			
		||||
  return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function preProcessImageContent(
 | 
			
		||||
  content: RequestMessage["content"],
 | 
			
		||||
) {
 | 
			
		||||
  return preProcessImageContentBase(content, async (url) => ({
 | 
			
		||||
    type: "image_url",
 | 
			
		||||
    image_url: { url },
 | 
			
		||||
  })) as Promise<MultimodalContent[] | string>;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function preProcessImageContentForAlibabaDashScope(
 | 
			
		||||
  content: RequestMessage["content"],
 | 
			
		||||
) {
 | 
			
		||||
  return preProcessImageContentBase(content, async (url) => ({
 | 
			
		||||
    image: url,
 | 
			
		||||
  }));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const imageCaches: Record<string, string> = {};
 | 
			
		||||
export function cacheImageToBase64Image(imageUrl: string) {
 | 
			
		||||
  if (imageUrl.includes(CACHE_URL_PREFIX)) {
 | 
			
		||||
@@ -400,6 +418,7 @@ export function streamWithThink(
 | 
			
		||||
  let responseRes: Response;
 | 
			
		||||
  let isInThinkingMode = false;
 | 
			
		||||
  let lastIsThinking = false;
 | 
			
		||||
  let lastIsThinkingTagged = false; //between <think> and </think> tags
 | 
			
		||||
 | 
			
		||||
  // animate response to make it looks smooth
 | 
			
		||||
  function animateResponseText() {
 | 
			
		||||
@@ -579,6 +598,23 @@ export function streamWithThink(
 | 
			
		||||
          if (!chunk?.content || chunk.content.length === 0) {
 | 
			
		||||
            return;
 | 
			
		||||
          }
 | 
			
		||||
 | 
			
		||||
          // deal with <think> and </think> tags start
 | 
			
		||||
          if (!chunk.isThinking) {
 | 
			
		||||
            if (chunk.content.startsWith("<think>")) {
 | 
			
		||||
              chunk.isThinking = true;
 | 
			
		||||
              chunk.content = chunk.content.slice(7).trim();
 | 
			
		||||
              lastIsThinkingTagged = true;
 | 
			
		||||
            } else if (chunk.content.endsWith("</think>")) {
 | 
			
		||||
              chunk.isThinking = false;
 | 
			
		||||
              chunk.content = chunk.content.slice(0, -8).trim();
 | 
			
		||||
              lastIsThinkingTagged = false;
 | 
			
		||||
            } else if (lastIsThinkingTagged) {
 | 
			
		||||
              chunk.isThinking = true;
 | 
			
		||||
            }
 | 
			
		||||
          }
 | 
			
		||||
          // deal with <think> and </think> tags start
 | 
			
		||||
 | 
			
		||||
          // Check if thinking mode changed
 | 
			
		||||
          const isThinkingChanged = lastIsThinking !== chunk.isThinking;
 | 
			
		||||
          lastIsThinking = chunk.isThinking;
 | 
			
		||||
 
 | 
			
		||||
@@ -15,6 +15,8 @@ const config: Config = {
 | 
			
		||||
  moduleNameMapper: {
 | 
			
		||||
    "^@/(.*)$": "<rootDir>/$1",
 | 
			
		||||
  },
 | 
			
		||||
  extensionsToTreatAsEsm: [".ts", ".tsx"],
 | 
			
		||||
  injectGlobals: true,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// createJestConfig is exported this way to ensure that next/jest can load the Next.js config which is async
 | 
			
		||||
 
 | 
			
		||||
@@ -1,24 +1,22 @@
 | 
			
		||||
// Learn more: https://github.com/testing-library/jest-dom
 | 
			
		||||
import "@testing-library/jest-dom";
 | 
			
		||||
import { jest } from "@jest/globals";
 | 
			
		||||
 | 
			
		||||
global.fetch = jest.fn(() =>
 | 
			
		||||
  Promise.resolve({
 | 
			
		||||
    ok: true,
 | 
			
		||||
    status: 200,
 | 
			
		||||
    json: () => Promise.resolve({}),
 | 
			
		||||
    json: () => Promise.resolve([]),
 | 
			
		||||
    headers: new Headers(),
 | 
			
		||||
    redirected: false,
 | 
			
		||||
    statusText: "OK",
 | 
			
		||||
    type: "basic",
 | 
			
		||||
    url: "",
 | 
			
		||||
    clone: function () {
 | 
			
		||||
      return this;
 | 
			
		||||
    },
 | 
			
		||||
    body: null,
 | 
			
		||||
    bodyUsed: false,
 | 
			
		||||
    arrayBuffer: () => Promise.resolve(new ArrayBuffer(0)),
 | 
			
		||||
    blob: () => Promise.resolve(new Blob()),
 | 
			
		||||
    formData: () => Promise.resolve(new FormData()),
 | 
			
		||||
    text: () => Promise.resolve(""),
 | 
			
		||||
  }),
 | 
			
		||||
  } as Response),
 | 
			
		||||
);
 | 
			
		||||
 
 | 
			
		||||
@@ -17,8 +17,8 @@
 | 
			
		||||
    "prompts": "node ./scripts/fetch-prompts.mjs",
 | 
			
		||||
    "prepare": "husky install",
 | 
			
		||||
    "proxy-dev": "sh ./scripts/init-proxy.sh && proxychains -f ./scripts/proxychains.conf yarn dev",
 | 
			
		||||
    "test": "jest --watch",
 | 
			
		||||
    "test:ci": "jest --ci"
 | 
			
		||||
    "test": "node --no-warnings --experimental-vm-modules $(yarn bin jest) --watch",
 | 
			
		||||
    "test:ci": "node --no-warnings --experimental-vm-modules $(yarn bin jest) --ci"
 | 
			
		||||
  },
 | 
			
		||||
  "dependencies": {
 | 
			
		||||
    "@fortaine/fetch-event-source": "^3.0.6",
 | 
			
		||||
 
 | 
			
		||||
@@ -1,3 +1,4 @@
 | 
			
		||||
import { jest } from "@jest/globals";
 | 
			
		||||
import { isVisionModel } from "../app/utils";
 | 
			
		||||
 | 
			
		||||
describe("isVisionModel", () => {
 | 
			
		||||
@@ -50,7 +51,7 @@ describe("isVisionModel", () => {
 | 
			
		||||
 | 
			
		||||
  test("should identify models from VISION_MODELS env var", () => {
 | 
			
		||||
    process.env.VISION_MODELS = "custom-vision-model,another-vision-model";
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    expect(isVisionModel("custom-vision-model")).toBe(true);
 | 
			
		||||
    expect(isVisionModel("another-vision-model")).toBe(true);
 | 
			
		||||
    expect(isVisionModel("unrelated-model")).toBe(false);
 | 
			
		||||
@@ -64,4 +65,4 @@ describe("isVisionModel", () => {
 | 
			
		||||
    expect(isVisionModel("unrelated-model")).toBe(false);
 | 
			
		||||
    expect(isVisionModel("gpt-4-vision")).toBe(true);
 | 
			
		||||
  });
 | 
			
		||||
});
 | 
			
		||||
});
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user