mirror of
				https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
				synced 2025-11-04 16:23:41 +08:00 
			
		
		
		
	Compare commits
	
		
			17 Commits
		
	
	
		
			6305-bugth
			...
			eb6cfc61da
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					eb6cfc61da | ||
| 
						 | 
					11b37c15bd | ||
| 
						 | 
					1d0038f17d | ||
| 
						 | 
					619fa519c0 | ||
| 
						 | 
					48469bd8ca | ||
| 
						 | 
					5a5e887f2b | ||
| 
						 | 
					b6f5d75656 | ||
| 
						 | 
					0d41a17ef6 | ||
| 
						 | 
					f7cde17919 | ||
| 
						 | 
					570cbb34b6 | ||
| 
						 | 
					7aa9ae0a3e | ||
| 
						 | 
					ad6666eeaf | ||
| 
						 | 
					a2c4e468a0 | ||
| 
						 | 
					a87ec75ba6 | ||
| 
						 | 
					0a25a1a8cb | ||
| 
						 | 
					b709ee3983 | ||
| 
						 | 
					6e082ad7ac | 
							
								
								
									
										21
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										21
									
								
								README.md
									
									
									
									
									
								
							@@ -7,7 +7,7 @@
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
<h1 align="center">NextChat (ChatGPT Next Web)</h1>
 | 
			
		||||
<h1 align="center">NextChat</h1>
 | 
			
		||||
 | 
			
		||||
English / [简体中文](./README_CN.md)
 | 
			
		||||
 | 
			
		||||
@@ -22,7 +22,6 @@ English / [简体中文](./README_CN.md)
 | 
			
		||||
[![MacOS][MacOS-image]][download-url]
 | 
			
		||||
[![Linux][Linux-image]][download-url]
 | 
			
		||||
 | 
			
		||||
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) 
 | 
			
		||||
[NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -41,24 +40,6 @@ English / [简体中文](./README_CN.md)
 | 
			
		||||
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
## 👋 Hey, NextChat is going to develop a native app!
 | 
			
		||||
 | 
			
		||||
> This week we are going to start working on iOS and Android APP, and we want to find some reliable friends to do it together!
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
✨ Several key points:
 | 
			
		||||
 | 
			
		||||
- Starting from 0, you are a veteran
 | 
			
		||||
- Completely open source, not hidden
 | 
			
		||||
- Native development, pursuing the ultimate experience
 | 
			
		||||
 | 
			
		||||
Will you come and do something together? 😎
 | 
			
		||||
 | 
			
		||||
https://github.com/ChatGPTNextWeb/NextChat/issues/6269
 | 
			
		||||
 | 
			
		||||
#Seeking for talents is thirsty #lack of people
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## 🥳 Cheer for DeepSeek, China's AI star!
 | 
			
		||||
 > Purpose-Built UI for DeepSeek Reasoner Model
 | 
			
		||||
 
 | 
			
		||||
 
 | 
			
		||||
@@ -90,6 +90,14 @@ export async function requestOpenai(req: NextRequest) {
 | 
			
		||||
 | 
			
		||||
  const fetchUrl = cloudflareAIGatewayUrl(`${baseUrl}/${path}`);
 | 
			
		||||
  console.log("fetchUrl", fetchUrl);
 | 
			
		||||
 | 
			
		||||
  let payload = await req.text();
 | 
			
		||||
  if (baseUrl.includes("openrouter.ai")) {
 | 
			
		||||
    const body = JSON.parse(payload);
 | 
			
		||||
    body["include_reasoning"] = true;
 | 
			
		||||
    payload = JSON.stringify(body);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  const fetchOptions: RequestInit = {
 | 
			
		||||
    headers: {
 | 
			
		||||
      "Content-Type": "application/json",
 | 
			
		||||
@@ -100,7 +108,7 @@ export async function requestOpenai(req: NextRequest) {
 | 
			
		||||
      }),
 | 
			
		||||
    },
 | 
			
		||||
    method: req.method,
 | 
			
		||||
    body: req.body,
 | 
			
		||||
    body: payload,
 | 
			
		||||
    // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body
 | 
			
		||||
    redirect: "manual",
 | 
			
		||||
    // @ts-ignore
 | 
			
		||||
@@ -111,10 +119,7 @@ export async function requestOpenai(req: NextRequest) {
 | 
			
		||||
  // #1815 try to refuse gpt4 request
 | 
			
		||||
  if (serverConfig.customModels && req.body) {
 | 
			
		||||
    try {
 | 
			
		||||
      const clonedBody = await req.text();
 | 
			
		||||
      fetchOptions.body = clonedBody;
 | 
			
		||||
 | 
			
		||||
      const jsonBody = JSON.parse(clonedBody) as { model?: string };
 | 
			
		||||
      const jsonBody = JSON.parse(payload) as { model?: string };
 | 
			
		||||
 | 
			
		||||
      // not undefined and is false
 | 
			
		||||
      if (
 | 
			
		||||
 
 | 
			
		||||
@@ -40,6 +40,11 @@ export interface MultimodalContent {
 | 
			
		||||
  };
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface MultimodalContentForAlibaba {
 | 
			
		||||
  text?: string;
 | 
			
		||||
  image?: string;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface RequestMessage {
 | 
			
		||||
  role: MessageRole;
 | 
			
		||||
  content: string | MultimodalContent[];
 | 
			
		||||
 
 | 
			
		||||
@@ -7,7 +7,10 @@ import {
 | 
			
		||||
  ChatMessageTool,
 | 
			
		||||
  usePluginStore,
 | 
			
		||||
} from "@/app/store";
 | 
			
		||||
import { streamWithThink } from "@/app/utils/chat";
 | 
			
		||||
import {
 | 
			
		||||
  preProcessImageContentForAlibabaDashScope,
 | 
			
		||||
  streamWithThink,
 | 
			
		||||
} from "@/app/utils/chat";
 | 
			
		||||
import {
 | 
			
		||||
  ChatOptions,
 | 
			
		||||
  getHeaders,
 | 
			
		||||
@@ -15,12 +18,14 @@ import {
 | 
			
		||||
  LLMModel,
 | 
			
		||||
  SpeechOptions,
 | 
			
		||||
  MultimodalContent,
 | 
			
		||||
  MultimodalContentForAlibaba,
 | 
			
		||||
} from "../api";
 | 
			
		||||
import { getClientConfig } from "@/app/config/client";
 | 
			
		||||
import {
 | 
			
		||||
  getMessageTextContent,
 | 
			
		||||
  getMessageTextContentWithoutThinking,
 | 
			
		||||
  getTimeoutMSByModel,
 | 
			
		||||
  isVisionModel,
 | 
			
		||||
} from "@/app/utils";
 | 
			
		||||
import { fetch } from "@/app/utils/stream";
 | 
			
		||||
 | 
			
		||||
@@ -89,14 +94,6 @@ export class QwenApi implements LLMApi {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async chat(options: ChatOptions) {
 | 
			
		||||
    const messages = options.messages.map((v) => ({
 | 
			
		||||
      role: v.role,
 | 
			
		||||
      content:
 | 
			
		||||
        v.role === "assistant"
 | 
			
		||||
          ? getMessageTextContentWithoutThinking(v)
 | 
			
		||||
          : getMessageTextContent(v),
 | 
			
		||||
    }));
 | 
			
		||||
 | 
			
		||||
    const modelConfig = {
 | 
			
		||||
      ...useAppConfig.getState().modelConfig,
 | 
			
		||||
      ...useChatStore.getState().currentSession().mask.modelConfig,
 | 
			
		||||
@@ -105,6 +102,21 @@ export class QwenApi implements LLMApi {
 | 
			
		||||
      },
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    const visionModel = isVisionModel(options.config.model);
 | 
			
		||||
 | 
			
		||||
    const messages: ChatOptions["messages"] = [];
 | 
			
		||||
    for (const v of options.messages) {
 | 
			
		||||
      const content = (
 | 
			
		||||
        visionModel
 | 
			
		||||
          ? await preProcessImageContentForAlibabaDashScope(v.content)
 | 
			
		||||
          : v.role === "assistant"
 | 
			
		||||
          ? getMessageTextContentWithoutThinking(v)
 | 
			
		||||
          : getMessageTextContent(v)
 | 
			
		||||
      ) as any;
 | 
			
		||||
 | 
			
		||||
      messages.push({ role: v.role, content });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const shouldStream = !!options.config.stream;
 | 
			
		||||
    const requestPayload: RequestPayload = {
 | 
			
		||||
      model: modelConfig.model,
 | 
			
		||||
@@ -129,7 +141,7 @@ export class QwenApi implements LLMApi {
 | 
			
		||||
        "X-DashScope-SSE": shouldStream ? "enable" : "disable",
 | 
			
		||||
      };
 | 
			
		||||
 | 
			
		||||
      const chatPath = this.path(Alibaba.ChatPath);
 | 
			
		||||
      const chatPath = this.path(Alibaba.ChatPath(modelConfig.model));
 | 
			
		||||
      const chatPayload = {
 | 
			
		||||
        method: "POST",
 | 
			
		||||
        body: JSON.stringify(requestPayload),
 | 
			
		||||
@@ -162,7 +174,7 @@ export class QwenApi implements LLMApi {
 | 
			
		||||
            const json = JSON.parse(text);
 | 
			
		||||
            const choices = json.output.choices as Array<{
 | 
			
		||||
              message: {
 | 
			
		||||
                content: string | null;
 | 
			
		||||
                content: string | null | MultimodalContentForAlibaba[];
 | 
			
		||||
                tool_calls: ChatMessageTool[];
 | 
			
		||||
                reasoning_content: string | null;
 | 
			
		||||
              };
 | 
			
		||||
@@ -212,7 +224,9 @@ export class QwenApi implements LLMApi {
 | 
			
		||||
            } else if (content && content.length > 0) {
 | 
			
		||||
              return {
 | 
			
		||||
                isThinking: false,
 | 
			
		||||
                content: content,
 | 
			
		||||
                content: Array.isArray(content)
 | 
			
		||||
                  ? content.map((item) => item.text).join(",")
 | 
			
		||||
                  : content,
 | 
			
		||||
              };
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -2,10 +2,10 @@
 | 
			
		||||
// azure and openai, using same models. so using same LLMApi.
 | 
			
		||||
import {
 | 
			
		||||
  ApiPath,
 | 
			
		||||
  OPENAI_BASE_URL,
 | 
			
		||||
  DEFAULT_MODELS,
 | 
			
		||||
  OpenaiPath,
 | 
			
		||||
  Azure,
 | 
			
		||||
  DEFAULT_MODELS,
 | 
			
		||||
  OPENAI_BASE_URL,
 | 
			
		||||
  OpenaiPath,
 | 
			
		||||
  REQUEST_TIMEOUT_MS,
 | 
			
		||||
  ServiceProvider,
 | 
			
		||||
} from "@/app/constant";
 | 
			
		||||
@@ -18,13 +18,13 @@ import {
 | 
			
		||||
} from "@/app/store";
 | 
			
		||||
import { collectModelsWithDefaultModel } from "@/app/utils/model";
 | 
			
		||||
import {
 | 
			
		||||
  preProcessImageContent,
 | 
			
		||||
  uploadImage,
 | 
			
		||||
  base64Image2Blob,
 | 
			
		||||
  preProcessImageContent,
 | 
			
		||||
  streamWithThink,
 | 
			
		||||
  uploadImage,
 | 
			
		||||
} from "@/app/utils/chat";
 | 
			
		||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
 | 
			
		||||
import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing";
 | 
			
		||||
import { DalleQuality, DalleStyle, ModelSize } from "@/app/typing";
 | 
			
		||||
 | 
			
		||||
import {
 | 
			
		||||
  ChatOptions,
 | 
			
		||||
@@ -39,9 +39,9 @@ import Locale from "../../locales";
 | 
			
		||||
import { getClientConfig } from "@/app/config/client";
 | 
			
		||||
import {
 | 
			
		||||
  getMessageTextContent,
 | 
			
		||||
  isVisionModel,
 | 
			
		||||
  isDalle3 as _isDalle3,
 | 
			
		||||
  getTimeoutMSByModel,
 | 
			
		||||
  isDalle3 as _isDalle3,
 | 
			
		||||
  isVisionModel,
 | 
			
		||||
} from "@/app/utils";
 | 
			
		||||
import { fetch } from "@/app/utils/stream";
 | 
			
		||||
 | 
			
		||||
@@ -294,6 +294,13 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
            useChatStore.getState().currentSession().mask?.plugin || [],
 | 
			
		||||
          );
 | 
			
		||||
        // console.log("getAsTools", tools, funcs);
 | 
			
		||||
 | 
			
		||||
        // Add "include_reasoning" for OpenRouter: https://openrouter.ai/announcements/reasoning-tokens-for-thinking-models
 | 
			
		||||
        if (chatPath.includes("openrouter.ai")) {
 | 
			
		||||
          // @ts-ignore
 | 
			
		||||
          requestPayload["include_reasoning"] = true;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        streamWithThink(
 | 
			
		||||
          chatPath,
 | 
			
		||||
          requestPayload,
 | 
			
		||||
@@ -310,6 +317,7 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
                content: string;
 | 
			
		||||
                tool_calls: ChatMessageTool[];
 | 
			
		||||
                reasoning_content: string | null;
 | 
			
		||||
                reasoning: string | null;
 | 
			
		||||
              };
 | 
			
		||||
            }>;
 | 
			
		||||
 | 
			
		||||
@@ -335,7 +343,9 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
              }
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            const reasoning = choices[0]?.delta?.reasoning_content;
 | 
			
		||||
            const reasoning =
 | 
			
		||||
              choices[0]?.delta?.reasoning_content ||
 | 
			
		||||
              choices[0]?.delta?.reasoning;
 | 
			
		||||
            const content = choices[0]?.delta?.content;
 | 
			
		||||
 | 
			
		||||
            // Skip if both content and reasoning_content are empty or null
 | 
			
		||||
@@ -411,6 +421,7 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
      options.onError?.(e as Error);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async usage() {
 | 
			
		||||
    const formatDate = (d: Date) =>
 | 
			
		||||
      `${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, "0")}-${d
 | 
			
		||||
@@ -514,4 +525,5 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
    }));
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export { OpenaiPath };
 | 
			
		||||
 
 | 
			
		||||
@@ -221,7 +221,12 @@ export const ByteDance = {
 | 
			
		||||
 | 
			
		||||
export const Alibaba = {
 | 
			
		||||
  ExampleEndpoint: ALIBABA_BASE_URL,
 | 
			
		||||
  ChatPath: "v1/services/aigc/text-generation/generation",
 | 
			
		||||
  ChatPath: (modelName: string) => {
 | 
			
		||||
    if (modelName.includes("vl") || modelName.includes("omni")) {
 | 
			
		||||
      return "v1/services/aigc/multimodal-generation/generation";
 | 
			
		||||
    }
 | 
			
		||||
    return `v1/services/aigc/text-generation/generation`;
 | 
			
		||||
  },
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const Tencent = {
 | 
			
		||||
@@ -412,6 +417,14 @@ export const KnowledgeCutOffDate: Record<string, string> = {
 | 
			
		||||
  "gpt-4-turbo": "2023-12",
 | 
			
		||||
  "gpt-4-turbo-2024-04-09": "2023-12",
 | 
			
		||||
  "gpt-4-turbo-preview": "2023-12",
 | 
			
		||||
  "gpt-4.1": "2024-06",
 | 
			
		||||
  "gpt-4.1-2025-04-14": "2024-06",
 | 
			
		||||
  "gpt-4.1-mini": "2024-06",
 | 
			
		||||
  "gpt-4.1-mini-2025-04-14": "2024-06",
 | 
			
		||||
  "gpt-4.1-nano": "2024-06",
 | 
			
		||||
  "gpt-4.1-nano-2025-04-14": "2024-06",
 | 
			
		||||
  "gpt-4.5-preview": "2023-10",
 | 
			
		||||
  "gpt-4.5-preview-2025-02-27": "2023-10",
 | 
			
		||||
  "gpt-4o": "2023-10",
 | 
			
		||||
  "gpt-4o-2024-05-13": "2023-10",
 | 
			
		||||
  "gpt-4o-2024-08-06": "2023-10",
 | 
			
		||||
@@ -453,6 +466,7 @@ export const DEFAULT_TTS_VOICES = [
 | 
			
		||||
export const VISION_MODEL_REGEXES = [
 | 
			
		||||
  /vision/,
 | 
			
		||||
  /gpt-4o/,
 | 
			
		||||
  /gpt-4\.1/,
 | 
			
		||||
  /claude-3/,
 | 
			
		||||
  /gemini-1\.5/,
 | 
			
		||||
  /gemini-exp/,
 | 
			
		||||
@@ -480,6 +494,14 @@ const openaiModels = [
 | 
			
		||||
  "gpt-4-32k-0613",
 | 
			
		||||
  "gpt-4-turbo",
 | 
			
		||||
  "gpt-4-turbo-preview",
 | 
			
		||||
  "gpt-4.1",
 | 
			
		||||
  "gpt-4.1-2025-04-14",
 | 
			
		||||
  "gpt-4.1-mini",
 | 
			
		||||
  "gpt-4.1-mini-2025-04-14",
 | 
			
		||||
  "gpt-4.1-nano",
 | 
			
		||||
  "gpt-4.1-nano-2025-04-14",
 | 
			
		||||
  "gpt-4.5-preview",
 | 
			
		||||
  "gpt-4.5-preview-2025-02-27",
 | 
			
		||||
  "gpt-4o",
 | 
			
		||||
  "gpt-4o-2024-05-13",
 | 
			
		||||
  "gpt-4o-2024-08-06",
 | 
			
		||||
@@ -570,6 +592,9 @@ const alibabaModes = [
 | 
			
		||||
  "qwen-max-0403",
 | 
			
		||||
  "qwen-max-0107",
 | 
			
		||||
  "qwen-max-longcontext",
 | 
			
		||||
  "qwen-omni-turbo",
 | 
			
		||||
  "qwen-vl-plus",
 | 
			
		||||
  "qwen-vl-max",
 | 
			
		||||
];
 | 
			
		||||
 | 
			
		||||
const tencentModels = [
 | 
			
		||||
 
 | 
			
		||||
@@ -1,9 +1,9 @@
 | 
			
		||||
import {
 | 
			
		||||
  CACHE_URL_PREFIX,
 | 
			
		||||
  UPLOAD_URL,
 | 
			
		||||
  REQUEST_TIMEOUT_MS,
 | 
			
		||||
  UPLOAD_URL,
 | 
			
		||||
} from "@/app/constant";
 | 
			
		||||
import { RequestMessage } from "@/app/client/api";
 | 
			
		||||
import { MultimodalContent, RequestMessage } from "@/app/client/api";
 | 
			
		||||
import Locale from "@/app/locales";
 | 
			
		||||
import {
 | 
			
		||||
  EventStreamContentType,
 | 
			
		||||
@@ -70,8 +70,9 @@ export function compressImage(file: Blob, maxSize: number): Promise<string> {
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function preProcessImageContent(
 | 
			
		||||
export async function preProcessImageContentBase(
 | 
			
		||||
  content: RequestMessage["content"],
 | 
			
		||||
  transformImageUrl: (url: string) => Promise<{ [key: string]: any }>,
 | 
			
		||||
) {
 | 
			
		||||
  if (typeof content === "string") {
 | 
			
		||||
    return content;
 | 
			
		||||
@@ -81,7 +82,7 @@ export async function preProcessImageContent(
 | 
			
		||||
    if (part?.type == "image_url" && part?.image_url?.url) {
 | 
			
		||||
      try {
 | 
			
		||||
        const url = await cacheImageToBase64Image(part?.image_url?.url);
 | 
			
		||||
        result.push({ type: part.type, image_url: { url } });
 | 
			
		||||
        result.push(await transformImageUrl(url));
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        console.error("Error processing image URL:", error);
 | 
			
		||||
      }
 | 
			
		||||
@@ -92,7 +93,25 @@ export async function preProcessImageContent(
 | 
			
		||||
  return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function preProcessImageContent(
 | 
			
		||||
  content: RequestMessage["content"],
 | 
			
		||||
) {
 | 
			
		||||
  return preProcessImageContentBase(content, async (url) => ({
 | 
			
		||||
    type: "image_url",
 | 
			
		||||
    image_url: { url },
 | 
			
		||||
  })) as Promise<MultimodalContent[] | string>;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function preProcessImageContentForAlibabaDashScope(
 | 
			
		||||
  content: RequestMessage["content"],
 | 
			
		||||
) {
 | 
			
		||||
  return preProcessImageContentBase(content, async (url) => ({
 | 
			
		||||
    image: url,
 | 
			
		||||
  }));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const imageCaches: Record<string, string> = {};
 | 
			
		||||
 | 
			
		||||
export function cacheImageToBase64Image(imageUrl: string) {
 | 
			
		||||
  if (imageUrl.includes(CACHE_URL_PREFIX)) {
 | 
			
		||||
    if (!imageCaches[imageUrl]) {
 | 
			
		||||
@@ -367,6 +386,7 @@ export function stream(
 | 
			
		||||
      openWhenHidden: true,
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  console.debug("[ChatAPI] start");
 | 
			
		||||
  chatApi(chatPath, headers, requestPayload, tools); // call fetchEventSource
 | 
			
		||||
}
 | 
			
		||||
@@ -609,16 +629,9 @@ export function streamWithThink(
 | 
			
		||||
              if (remainText.length > 0) {
 | 
			
		||||
                remainText += "\n";
 | 
			
		||||
              }
 | 
			
		||||
              remainText += "> " + chunk.content;
 | 
			
		||||
            } else {
 | 
			
		||||
              // Handle newlines in thinking content
 | 
			
		||||
              if (chunk.content.includes("\n\n")) {
 | 
			
		||||
                const lines = chunk.content.split("\n\n");
 | 
			
		||||
                remainText += lines.join("\n\n> ");
 | 
			
		||||
              } else {
 | 
			
		||||
                remainText += chunk.content;
 | 
			
		||||
              }
 | 
			
		||||
              remainText += "> ";
 | 
			
		||||
            }
 | 
			
		||||
            remainText += chunk.content.replaceAll("\n", "\n> ");
 | 
			
		||||
          } else {
 | 
			
		||||
            // If in normal mode
 | 
			
		||||
            if (isInThinkingMode || isThinkingChanged) {
 | 
			
		||||
@@ -644,6 +657,7 @@ export function streamWithThink(
 | 
			
		||||
      openWhenHidden: true,
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  console.debug("[ChatAPI] start");
 | 
			
		||||
  chatApi(chatPath, headers, requestPayload, tools); // call fetchEventSource
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -15,6 +15,8 @@ const config: Config = {
 | 
			
		||||
  moduleNameMapper: {
 | 
			
		||||
    "^@/(.*)$": "<rootDir>/$1",
 | 
			
		||||
  },
 | 
			
		||||
  extensionsToTreatAsEsm: [".ts", ".tsx"],
 | 
			
		||||
  injectGlobals: true,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// createJestConfig is exported this way to ensure that next/jest can load the Next.js config which is async
 | 
			
		||||
 
 | 
			
		||||
@@ -1,24 +1,22 @@
 | 
			
		||||
// Learn more: https://github.com/testing-library/jest-dom
 | 
			
		||||
import "@testing-library/jest-dom";
 | 
			
		||||
import { jest } from "@jest/globals";
 | 
			
		||||
 | 
			
		||||
global.fetch = jest.fn(() =>
 | 
			
		||||
  Promise.resolve({
 | 
			
		||||
    ok: true,
 | 
			
		||||
    status: 200,
 | 
			
		||||
    json: () => Promise.resolve({}),
 | 
			
		||||
    json: () => Promise.resolve([]),
 | 
			
		||||
    headers: new Headers(),
 | 
			
		||||
    redirected: false,
 | 
			
		||||
    statusText: "OK",
 | 
			
		||||
    type: "basic",
 | 
			
		||||
    url: "",
 | 
			
		||||
    clone: function () {
 | 
			
		||||
      return this;
 | 
			
		||||
    },
 | 
			
		||||
    body: null,
 | 
			
		||||
    bodyUsed: false,
 | 
			
		||||
    arrayBuffer: () => Promise.resolve(new ArrayBuffer(0)),
 | 
			
		||||
    blob: () => Promise.resolve(new Blob()),
 | 
			
		||||
    formData: () => Promise.resolve(new FormData()),
 | 
			
		||||
    text: () => Promise.resolve(""),
 | 
			
		||||
  }),
 | 
			
		||||
  } as Response),
 | 
			
		||||
);
 | 
			
		||||
 
 | 
			
		||||
@@ -17,8 +17,8 @@
 | 
			
		||||
    "prompts": "node ./scripts/fetch-prompts.mjs",
 | 
			
		||||
    "prepare": "husky install",
 | 
			
		||||
    "proxy-dev": "sh ./scripts/init-proxy.sh && proxychains -f ./scripts/proxychains.conf yarn dev",
 | 
			
		||||
    "test": "jest --watch",
 | 
			
		||||
    "test:ci": "jest --ci"
 | 
			
		||||
    "test": "node --no-warnings --experimental-vm-modules $(yarn bin jest) --watch",
 | 
			
		||||
    "test:ci": "node --no-warnings --experimental-vm-modules $(yarn bin jest) --ci"
 | 
			
		||||
  },
 | 
			
		||||
  "dependencies": {
 | 
			
		||||
    "@fortaine/fetch-event-source": "^3.0.6",
 | 
			
		||||
 
 | 
			
		||||
@@ -1,3 +1,4 @@
 | 
			
		||||
import { jest } from "@jest/globals";
 | 
			
		||||
import { isVisionModel } from "../app/utils";
 | 
			
		||||
 | 
			
		||||
describe("isVisionModel", () => {
 | 
			
		||||
@@ -50,7 +51,7 @@ describe("isVisionModel", () => {
 | 
			
		||||
 | 
			
		||||
  test("should identify models from VISION_MODELS env var", () => {
 | 
			
		||||
    process.env.VISION_MODELS = "custom-vision-model,another-vision-model";
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    expect(isVisionModel("custom-vision-model")).toBe(true);
 | 
			
		||||
    expect(isVisionModel("another-vision-model")).toBe(true);
 | 
			
		||||
    expect(isVisionModel("unrelated-model")).toBe(false);
 | 
			
		||||
@@ -64,4 +65,4 @@ describe("isVisionModel", () => {
 | 
			
		||||
    expect(isVisionModel("unrelated-model")).toBe(false);
 | 
			
		||||
    expect(isVisionModel("gpt-4-vision")).toBe(true);
 | 
			
		||||
  });
 | 
			
		||||
});
 | 
			
		||||
});
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user