Compare commits

..

1 Commits

8 changed files with 313 additions and 80 deletions

View File

@@ -22,7 +22,7 @@ English / [简体中文](./README_CN.md)
[![MacOS][MacOS-image]][download-url]
[![Linux][Linux-image]][download-url]
[NextChatAI](https://nextchat.club?utm_source=readme) / [iOS APP](https://apps.apple.com/us/app/nextchat-ai/id6743085599) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Enterprise Edition](#enterprise-edition)
[NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
[saas-url]: https://nextchat.club?utm_source=readme
@@ -40,14 +40,13 @@ English / [简体中文](./README_CN.md)
</div>
## 🥳 Cheer for NextChat iOS Version Online!
> [👉 Click Here to Install Now](https://apps.apple.com/us/app/nextchat-ai/id6743085599)
> [❤️ Source Code Coming Soon](https://github.com/ChatGPTNextWeb/NextChat-iOS)
![Github iOS Image](https://github.com/user-attachments/assets/e0aa334f-4c13-4dc9-8310-e3b09fa4b9f3)
## 🥳 Cheer for DeepSeek, China's AI star!
> Purpose-Built UI for DeepSeek Reasoner Model
<img src="https://github.com/user-attachments/assets/f3952210-3af1-4dc0-9b81-40eaa4847d9a"/>
## 🫣 NextChat Support MCP !
> Before build, please set env ENABLE_MCP=true

View File

@@ -90,14 +90,6 @@ export async function requestOpenai(req: NextRequest) {
const fetchUrl = cloudflareAIGatewayUrl(`${baseUrl}/${path}`);
console.log("fetchUrl", fetchUrl);
let payload = await req.text();
if (baseUrl.includes("openrouter.ai")) {
const body = JSON.parse(payload);
body["include_reasoning"] = true;
payload = JSON.stringify(body);
}
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
@@ -108,7 +100,7 @@ export async function requestOpenai(req: NextRequest) {
}),
},
method: req.method,
body: payload,
body: req.body,
// to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body
redirect: "manual",
// @ts-ignore
@@ -119,7 +111,10 @@ export async function requestOpenai(req: NextRequest) {
// #1815 try to refuse gpt4 request
if (serverConfig.customModels && req.body) {
try {
const jsonBody = JSON.parse(payload) as { model?: string };
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (

View File

@@ -2,10 +2,10 @@
// azure and openai, using same models. so using same LLMApi.
import {
ApiPath,
Azure,
DEFAULT_MODELS,
OPENAI_BASE_URL,
DEFAULT_MODELS,
OpenaiPath,
Azure,
REQUEST_TIMEOUT_MS,
ServiceProvider,
} from "@/app/constant";
@@ -18,13 +18,13 @@ import {
} from "@/app/store";
import { collectModelsWithDefaultModel } from "@/app/utils/model";
import {
base64Image2Blob,
preProcessImageContent,
streamWithThink,
uploadImage,
base64Image2Blob,
streamWithThink,
} from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
import { DalleQuality, DalleStyle, ModelSize } from "@/app/typing";
import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing";
import {
ChatOptions,
@@ -39,9 +39,9 @@ import Locale from "../../locales";
import { getClientConfig } from "@/app/config/client";
import {
getMessageTextContent,
getTimeoutMSByModel,
isDalle3 as _isDalle3,
isVisionModel,
isDalle3 as _isDalle3,
getTimeoutMSByModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream";
@@ -198,8 +198,7 @@ export class ChatGPTApi implements LLMApi {
const isDalle3 = _isDalle3(options.config.model);
const isO1OrO3 =
options.config.model.startsWith("o1") ||
options.config.model.startsWith("o3") ||
options.config.model.startsWith("o4-mini");
options.config.model.startsWith("o3");
if (isDalle3) {
const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any,
@@ -244,7 +243,7 @@ export class ChatGPTApi implements LLMApi {
}
// add max_tokens to vision model
if (visionModel && !isO1OrO3) {
if (visionModel) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
}
}
@@ -295,13 +294,6 @@ export class ChatGPTApi implements LLMApi {
useChatStore.getState().currentSession().mask?.plugin || [],
);
// console.log("getAsTools", tools, funcs);
// Add "include_reasoning" for OpenRouter: https://openrouter.ai/announcements/reasoning-tokens-for-thinking-models
if (chatPath.includes("openrouter.ai")) {
// @ts-ignore
requestPayload["include_reasoning"] = true;
}
streamWithThink(
chatPath,
requestPayload,
@@ -318,7 +310,6 @@ export class ChatGPTApi implements LLMApi {
content: string;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
reasoning: string | null;
};
}>;
@@ -344,9 +335,7 @@ export class ChatGPTApi implements LLMApi {
}
}
const reasoning =
choices[0]?.delta?.reasoning_content ||
choices[0]?.delta?.reasoning;
const reasoning = choices[0]?.delta?.reasoning_content;
const content = choices[0]?.delta?.content;
// Skip if both content and reasoning_content are empty or null
@@ -422,7 +411,6 @@ export class ChatGPTApi implements LLMApi {
options.onError?.(e as Error);
}
}
async usage() {
const formatDate = (d: Date) =>
`${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, "0")}-${d
@@ -526,5 +514,4 @@ export class ChatGPTApi implements LLMApi {
}));
}
}
export { OpenaiPath };

View File

@@ -18,7 +18,6 @@ import ReturnIcon from "../icons/return.svg";
import CopyIcon from "../icons/copy.svg";
import SpeakIcon from "../icons/speak.svg";
import SpeakStopIcon from "../icons/speak-stop.svg";
import LoadingIcon from "../icons/three-dots.svg";
import LoadingButtonIcon from "../icons/loading.svg";
import PromptIcon from "../icons/prompt.svg";
import MaskIcon from "../icons/mask.svg";
@@ -79,8 +78,6 @@ import {
import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
import dynamic from "next/dynamic";
import { ChatControllerPool } from "../client/controller";
import { DalleQuality, DalleStyle, ModelSize } from "../typing";
import { Prompt, usePromptStore } from "../store/prompt";
@@ -125,14 +122,15 @@ import { getModelProvider } from "../utils/model";
import { RealtimeChat } from "@/app/components/realtime-chat";
import clsx from "clsx";
import { getAvailableClientsCount, isMcpEnabled } from "../mcp/actions";
import { Markdown } from "./markdown";
const localStorage = safeLocalStorage();
const ttsPlayer = createTTSPlayer();
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
loading: () => <LoadingIcon />,
});
// const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
// loading: () => <LoadingIcon />,
// });
const MCPAction = () => {
const navigate = useNavigate();
@@ -1984,6 +1982,8 @@ function _Chat() {
fontFamily={fontFamily}
parentRef={scrollRef}
defaultShow={i >= messages.length - 6}
immediatelyRender={i >= messages.length - 3}
streaming={message.streaming}
/>
{getMessageImages(message).length == 1 && (
<img

View File

@@ -267,6 +267,136 @@ function tryWrapHtmlCode(text: string) {
);
}
// Split content into paragraphs while preserving code blocks
function splitContentIntoParagraphs(content: string) {
// Check for unclosed code blocks
const codeBlockStartCount = (content.match(/```/g) || []).length;
let processedContent = content;
// Add closing tag if there's an odd number of code block markers
if (codeBlockStartCount % 2 !== 0) {
processedContent = content + "\n```";
}
// Extract code blocks
const codeBlockRegex = /```[\s\S]*?```/g;
const codeBlocks: string[] = [];
let codeBlockCounter = 0;
// Replace code blocks with placeholders
const contentWithPlaceholders = processedContent.replace(
codeBlockRegex,
(match) => {
codeBlocks.push(match);
const placeholder = `__CODE_BLOCK_${codeBlockCounter++}__`;
return placeholder;
},
);
// Split by double newlines
const paragraphs = contentWithPlaceholders
.split(/\n\n+/)
.filter((p) => p.trim());
// Restore code blocks
return paragraphs.map((p) => {
if (p.match(/__CODE_BLOCK_\d+__/)) {
return p.replace(/__CODE_BLOCK_\d+__/g, (match) => {
const index = parseInt(match.match(/\d+/)?.[0] || "0");
return codeBlocks[index] || match;
});
}
return p;
});
}
// Lazy-loaded paragraph component
function MarkdownParagraph({
content,
onLoad,
}: {
content: string;
onLoad?: () => void;
}) {
const [isLoaded, setIsLoaded] = useState(false);
const placeholderRef = useRef<HTMLDivElement>(null);
const [isVisible, setIsVisible] = useState(false);
useEffect(() => {
let observer: IntersectionObserver;
if (placeholderRef.current) {
observer = new IntersectionObserver(
(entries) => {
if (entries[0].isIntersecting) {
setIsVisible(true);
}
},
{ threshold: 0.1, rootMargin: "200px 0px" },
);
observer.observe(placeholderRef.current);
}
return () => observer?.disconnect();
}, []);
useEffect(() => {
if (isVisible && !isLoaded) {
setIsLoaded(true);
onLoad?.();
}
}, [isVisible, isLoaded, onLoad]);
// Generate preview content
const previewContent = useMemo(() => {
if (content.startsWith("```")) {
return "```" + (content.split("\n")[0] || "").slice(3) + "...```";
}
return content.length > 60 ? content.slice(0, 60) + "..." : content;
}, [content]);
return (
<div className="markdown-paragraph" ref={placeholderRef}>
{!isLoaded ? (
<div className="markdown-paragraph-placeholder">{previewContent}</div>
) : (
<_MarkDownContent content={content} />
)}
</div>
);
}
// Memoized paragraph component to prevent unnecessary re-renders
const MemoizedMarkdownParagraph = React.memo(
({ content }: { content: string }) => {
return <_MarkDownContent content={content} />;
},
(prevProps, nextProps) => prevProps.content === nextProps.content,
);
MemoizedMarkdownParagraph.displayName = "MemoizedMarkdownParagraph";
// Specialized component for streaming content
function StreamingMarkdownContent({ content }: { content: string }) {
const paragraphs = useMemo(
() => splitContentIntoParagraphs(content),
[content],
);
const lastParagraphRef = useRef<HTMLDivElement>(null);
return (
<div className="markdown-streaming-content">
{paragraphs.map((paragraph, index) => (
<div
key={`p-${index}-${paragraph.substring(0, 20)}`}
className="markdown-paragraph markdown-streaming-paragraph"
ref={index === paragraphs.length - 1 ? lastParagraphRef : null}
>
<MemoizedMarkdownParagraph content={paragraph} />
</div>
))}
</div>
);
}
function _MarkDownContent(props: { content: string }) {
const escapedContent = useMemo(() => {
return tryWrapHtmlCode(escapeBrackets(props.content));
@@ -326,9 +456,27 @@ export function Markdown(
fontFamily?: string;
parentRef?: RefObject<HTMLDivElement>;
defaultShow?: boolean;
immediatelyRender?: boolean;
streaming?: boolean; // Whether this is a streaming response
} & React.DOMAttributes<HTMLDivElement>,
) {
const mdRef = useRef<HTMLDivElement>(null);
const paragraphs = useMemo(
() => splitContentIntoParagraphs(props.content),
[props.content],
);
const [loadedCount, setLoadedCount] = useState(0);
// Determine rendering strategy based on props
const shouldAsyncRender =
!props.immediatelyRender && !props.streaming && paragraphs.length > 1;
useEffect(() => {
// Immediately render all paragraphs if specified
if (props.immediatelyRender) {
setLoadedCount(paragraphs.length);
}
}, [props.immediatelyRender, paragraphs.length]);
return (
<div
@@ -344,6 +492,24 @@ export function Markdown(
>
{props.loading ? (
<LoadingIcon />
) : props.streaming ? (
// Use specialized component for streaming content
<StreamingMarkdownContent content={props.content} />
) : shouldAsyncRender ? (
<div className="markdown-content">
{paragraphs.map((paragraph, index) => (
<MarkdownParagraph
key={index}
content={paragraph}
onLoad={() => setLoadedCount((prev) => prev + 1)}
/>
))}
{loadedCount < paragraphs.length && loadedCount > 0 && (
<div className="markdown-paragraph-loading">
<LoadingIcon />
</div>
)}
</div>
) : (
<MarkdownContent content={props.content} />
)}

View File

@@ -417,14 +417,6 @@ export const KnowledgeCutOffDate: Record<string, string> = {
"gpt-4-turbo": "2023-12",
"gpt-4-turbo-2024-04-09": "2023-12",
"gpt-4-turbo-preview": "2023-12",
"gpt-4.1": "2024-06",
"gpt-4.1-2025-04-14": "2024-06",
"gpt-4.1-mini": "2024-06",
"gpt-4.1-mini-2025-04-14": "2024-06",
"gpt-4.1-nano": "2024-06",
"gpt-4.1-nano-2025-04-14": "2024-06",
"gpt-4.5-preview": "2023-10",
"gpt-4.5-preview-2025-02-27": "2023-10",
"gpt-4o": "2023-10",
"gpt-4o-2024-05-13": "2023-10",
"gpt-4o-2024-08-06": "2023-10",
@@ -466,7 +458,6 @@ export const DEFAULT_TTS_VOICES = [
export const VISION_MODEL_REGEXES = [
/vision/,
/gpt-4o/,
/gpt-4\.1/,
/claude-3/,
/gemini-1\.5/,
/gemini-exp/,
@@ -478,8 +469,6 @@ export const VISION_MODEL_REGEXES = [
/^dall-e-3$/, // Matches exactly "dall-e-3"
/glm-4v/,
/vl/i,
/o3/,
/o4-mini/,
];
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
@@ -496,14 +485,6 @@ const openaiModels = [
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4.1",
"gpt-4.1-2025-04-14",
"gpt-4.1-mini",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano",
"gpt-4.1-nano-2025-04-14",
"gpt-4.5-preview",
"gpt-4.5-preview-2025-02-27",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
@@ -518,8 +499,6 @@ const openaiModels = [
"o1-mini",
"o1-preview",
"o3-mini",
"o3",
"o4-mini",
];
const googleModels = [

View File

@@ -99,6 +99,7 @@
font-size: 14px;
line-height: 1.5;
word-wrap: break-word;
margin-bottom: 0;
}
.light {
@@ -358,8 +359,14 @@
.markdown-body kbd {
display: inline-block;
padding: 3px 5px;
font: 11px ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas,
Liberation Mono, monospace;
font:
11px ui-monospace,
SFMono-Regular,
SF Mono,
Menlo,
Consolas,
Liberation Mono,
monospace;
line-height: 10px;
color: var(--color-fg-default);
vertical-align: middle;
@@ -448,16 +455,28 @@
.markdown-body tt,
.markdown-body code,
.markdown-body samp {
font-family: ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas,
Liberation Mono, monospace;
font-family:
ui-monospace,
SFMono-Regular,
SF Mono,
Menlo,
Consolas,
Liberation Mono,
monospace;
font-size: 12px;
}
.markdown-body pre {
margin-top: 0;
margin-bottom: 0;
font-family: ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas,
Liberation Mono, monospace;
font-family:
ui-monospace,
SFMono-Regular,
SF Mono,
Menlo,
Consolas,
Liberation Mono,
monospace;
font-size: 12px;
word-wrap: normal;
}
@@ -1130,3 +1149,87 @@
#dmermaid {
display: none;
}
.markdown-content {
width: 100%;
}
.markdown-paragraph {
transition: opacity 0.3s ease;
margin-bottom: 0.5em;
&.markdown-paragraph-visible {
opacity: 1;
}
&.markdown-paragraph-hidden {
opacity: 0.7;
}
}
.markdown-paragraph-placeholder {
padding: 8px;
color: var(--color-fg-subtle);
background-color: var(--color-canvas-subtle);
border-radius: 6px;
border-left: 3px solid var(--color-border-muted);
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
font-family: var(--font-family-sans);
font-size: 14px;
min-height: 1.2em;
}
.markdown-paragraph-loading {
height: 20px;
background-color: var(--color-canvas-subtle);
border-radius: 6px;
margin-bottom: 8px;
position: relative;
overflow: hidden;
&::after {
content: "";
position: absolute;
top: 0;
left: 0;
width: 30%;
height: 100%;
background: linear-gradient(
90deg,
transparent,
rgba(255, 255, 255, 0.1),
transparent
);
animation: shimmer 1.5s infinite;
}
}
@keyframes shimmer {
0% {
transform: translateX(-100%);
}
100% {
transform: translateX(200%);
}
}
.markdown-streaming-content {
width: 100%;
}
.markdown-streaming-paragraph {
opacity: 1;
animation: fadeIn 0.3s ease-in-out;
margin-bottom: 0.5em;
}
@keyframes fadeIn {
from {
opacity: 0.5;
}
to {
opacity: 1;
}
}

View File

@@ -1,7 +1,7 @@
import {
CACHE_URL_PREFIX,
REQUEST_TIMEOUT_MS,
UPLOAD_URL,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import { MultimodalContent, RequestMessage } from "@/app/client/api";
import Locale from "@/app/locales";
@@ -111,7 +111,6 @@ export async function preProcessImageContentForAlibabaDashScope(
}
const imageCaches: Record<string, string> = {};
export function cacheImageToBase64Image(imageUrl: string) {
if (imageUrl.includes(CACHE_URL_PREFIX)) {
if (!imageCaches[imageUrl]) {
@@ -386,7 +385,6 @@ export function stream(
openWhenHidden: true,
});
}
console.debug("[ChatAPI] start");
chatApi(chatPath, headers, requestPayload, tools); // call fetchEventSource
}
@@ -629,9 +627,16 @@ export function streamWithThink(
if (remainText.length > 0) {
remainText += "\n";
}
remainText += "> ";
remainText += "> " + chunk.content;
} else {
// Handle newlines in thinking content
if (chunk.content.includes("\n\n")) {
const lines = chunk.content.split("\n\n");
remainText += lines.join("\n\n> ");
} else {
remainText += chunk.content;
}
}
remainText += chunk.content.replaceAll("\n", "\n> ");
} else {
// If in normal mode
if (isInThinkingMode || isThinkingChanged) {
@@ -657,7 +662,6 @@ export function streamWithThink(
openWhenHidden: true,
});
}
console.debug("[ChatAPI] start");
chatApi(chatPath, headers, requestPayload, tools); // call fetchEventSource
}