Compare commits

..

11 Commits

Author SHA1 Message Date
Shay Molcho
aa34e43a5e Merge b05b2e78cd into 3809375694 2025-04-20 19:41:20 +02:00
RiverRay
3809375694 Merge pull request #6457 from ACTOR-ALCHEMIST/main
Some checks failed
Run Tests / test (push) Has been cancelled
Support OpenAI o3 and o4-mini
2025-04-19 16:00:41 +08:00
RiverRay
1b0de25986 Update README.md 2025-04-19 15:59:31 +08:00
RiverRay
865c45dd29 Update README.md 2025-04-19 15:56:53 +08:00
RiverRay
1f5d8e6d9c Merge pull request #6458 from ChatGPTNextWeb/Leizhenpeng-patch-7
Update README.md
2025-04-19 15:50:48 +08:00
RiverRay
c9ef6d58ed Update README.md 2025-04-19 15:50:17 +08:00
Jasper Hu
2d7229d2b8 feat: 支持 OpenAI 新模型 o3 与 o4-mini,并适配新参数 2025-04-18 20:36:07 +01:00
RiverRay
11b37c15bd Merge pull request #6450 from stephen-zeng/main
Some checks failed
Run Tests / test (push) Has been cancelled
Add gpt-4.1 family & gpt-4.5-preview support
2025-04-17 08:29:19 +08:00
QwQwQ
1d0038f17d add gpt-4.5-preview support 2025-04-16 22:10:47 +08:00
QwQwQ
619fa519c0 add gpt-4.1 family support 2025-04-16 22:02:35 +08:00
Shay Molcho
b05b2e78cd Added missing periods (.) in multiple places
This commit adds missing periods (.) in several places to ensure consistency and improve readability in the code and documentation
2025-01-22 20:23:51 +02:00
6 changed files with 47 additions and 293 deletions

View File

@@ -22,7 +22,7 @@ English / [简体中文](./README_CN.md)
[![MacOS][MacOS-image]][download-url]
[![Linux][Linux-image]][download-url]
[NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
[NextChatAI](https://nextchat.club?utm_source=readme) / [iOS APP](https://apps.apple.com/us/app/nextchat-ai/id6743085599) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Enterprise Edition](#enterprise-edition)
[saas-url]: https://nextchat.club?utm_source=readme
@@ -40,13 +40,14 @@ English / [简体中文](./README_CN.md)
</div>
## 🥳 Cheer for DeepSeek, China's AI star!
> Purpose-Built UI for DeepSeek Reasoner Model
## 🥳 Cheer for NextChat iOS Version Online!
> [👉 Click Here to Install Now](https://apps.apple.com/us/app/nextchat-ai/id6743085599)
> [❤️ Source Code Coming Soon](https://github.com/ChatGPTNextWeb/NextChat-iOS)
![Github iOS Image](https://github.com/user-attachments/assets/e0aa334f-4c13-4dc9-8310-e3b09fa4b9f3)
<img src="https://github.com/user-attachments/assets/f3952210-3af1-4dc0-9b81-40eaa4847d9a"/>
## 🫣 NextChat Support MCP !
> Before build, please set env ENABLE_MCP=true
@@ -111,7 +112,7 @@ For enterprise inquiries, please contact: **business@nextchat.dev**
- 🚀 v2.15.8 Now supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
- 🚀 v2.15.4 The Application supports using Tauri fetch LLM API, MORE SECURITY! [#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379)
- 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
- 🚀 v2.14.0 Now supports Artifacts & SD
- 🚀 v2.14.0 Now supports Artifacts & SD.
- 🚀 v2.10.1 support Google Gemini Pro model.
- 🚀 v2.9.11 you can use azure endpoint now.
- 🚀 v2.8 now we have a client that runs across all platforms!
@@ -319,7 +320,7 @@ For ByteDance: use `modelName@bytedance=deploymentName` to customize model name
### `DEFAULT_MODEL` optional
Change default model
Change default model.
### `VISION_MODELS` (optional)
@@ -350,7 +351,7 @@ Customize Stability API url.
### `ENABLE_MCP` (optional)
Enable MCPModel Context ProtocolFeature
Enable MCPModel Context ProtocolFeature.
### `SILICONFLOW_API_KEY` (optional)

View File

@@ -198,7 +198,8 @@ export class ChatGPTApi implements LLMApi {
const isDalle3 = _isDalle3(options.config.model);
const isO1OrO3 =
options.config.model.startsWith("o1") ||
options.config.model.startsWith("o3");
options.config.model.startsWith("o3") ||
options.config.model.startsWith("o4-mini");
if (isDalle3) {
const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any,
@@ -243,7 +244,7 @@ export class ChatGPTApi implements LLMApi {
}
// add max_tokens to vision model
if (visionModel) {
if (visionModel && !isO1OrO3) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
}
}

View File

@@ -18,6 +18,7 @@ import ReturnIcon from "../icons/return.svg";
import CopyIcon from "../icons/copy.svg";
import SpeakIcon from "../icons/speak.svg";
import SpeakStopIcon from "../icons/speak-stop.svg";
import LoadingIcon from "../icons/three-dots.svg";
import LoadingButtonIcon from "../icons/loading.svg";
import PromptIcon from "../icons/prompt.svg";
import MaskIcon from "../icons/mask.svg";
@@ -78,6 +79,8 @@ import {
import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
import dynamic from "next/dynamic";
import { ChatControllerPool } from "../client/controller";
import { DalleQuality, DalleStyle, ModelSize } from "../typing";
import { Prompt, usePromptStore } from "../store/prompt";
@@ -122,15 +125,14 @@ import { getModelProvider } from "../utils/model";
import { RealtimeChat } from "@/app/components/realtime-chat";
import clsx from "clsx";
import { getAvailableClientsCount, isMcpEnabled } from "../mcp/actions";
import { Markdown } from "./markdown";
const localStorage = safeLocalStorage();
const ttsPlayer = createTTSPlayer();
// const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
// loading: () => <LoadingIcon />,
// });
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
loading: () => <LoadingIcon />,
});
const MCPAction = () => {
const navigate = useNavigate();
@@ -1982,8 +1984,6 @@ function _Chat() {
fontFamily={fontFamily}
parentRef={scrollRef}
defaultShow={i >= messages.length - 6}
immediatelyRender={i >= messages.length - 3}
streaming={message.streaming}
/>
{getMessageImages(message).length == 1 && (
<img

View File

@@ -267,136 +267,6 @@ function tryWrapHtmlCode(text: string) {
);
}
// Split content into paragraphs while preserving code blocks
function splitContentIntoParagraphs(content: string) {
// Check for unclosed code blocks
const codeBlockStartCount = (content.match(/```/g) || []).length;
let processedContent = content;
// Add closing tag if there's an odd number of code block markers
if (codeBlockStartCount % 2 !== 0) {
processedContent = content + "\n```";
}
// Extract code blocks
const codeBlockRegex = /```[\s\S]*?```/g;
const codeBlocks: string[] = [];
let codeBlockCounter = 0;
// Replace code blocks with placeholders
const contentWithPlaceholders = processedContent.replace(
codeBlockRegex,
(match) => {
codeBlocks.push(match);
const placeholder = `__CODE_BLOCK_${codeBlockCounter++}__`;
return placeholder;
},
);
// Split by double newlines
const paragraphs = contentWithPlaceholders
.split(/\n\n+/)
.filter((p) => p.trim());
// Restore code blocks
return paragraphs.map((p) => {
if (p.match(/__CODE_BLOCK_\d+__/)) {
return p.replace(/__CODE_BLOCK_\d+__/g, (match) => {
const index = parseInt(match.match(/\d+/)?.[0] || "0");
return codeBlocks[index] || match;
});
}
return p;
});
}
// Lazy-loaded paragraph component
function MarkdownParagraph({
content,
onLoad,
}: {
content: string;
onLoad?: () => void;
}) {
const [isLoaded, setIsLoaded] = useState(false);
const placeholderRef = useRef<HTMLDivElement>(null);
const [isVisible, setIsVisible] = useState(false);
useEffect(() => {
let observer: IntersectionObserver;
if (placeholderRef.current) {
observer = new IntersectionObserver(
(entries) => {
if (entries[0].isIntersecting) {
setIsVisible(true);
}
},
{ threshold: 0.1, rootMargin: "200px 0px" },
);
observer.observe(placeholderRef.current);
}
return () => observer?.disconnect();
}, []);
useEffect(() => {
if (isVisible && !isLoaded) {
setIsLoaded(true);
onLoad?.();
}
}, [isVisible, isLoaded, onLoad]);
// Generate preview content
const previewContent = useMemo(() => {
if (content.startsWith("```")) {
return "```" + (content.split("\n")[0] || "").slice(3) + "...```";
}
return content.length > 60 ? content.slice(0, 60) + "..." : content;
}, [content]);
return (
<div className="markdown-paragraph" ref={placeholderRef}>
{!isLoaded ? (
<div className="markdown-paragraph-placeholder">{previewContent}</div>
) : (
<_MarkDownContent content={content} />
)}
</div>
);
}
// Memoized paragraph component to prevent unnecessary re-renders
const MemoizedMarkdownParagraph = React.memo(
({ content }: { content: string }) => {
return <_MarkDownContent content={content} />;
},
(prevProps, nextProps) => prevProps.content === nextProps.content,
);
MemoizedMarkdownParagraph.displayName = "MemoizedMarkdownParagraph";
// Specialized component for streaming content
function StreamingMarkdownContent({ content }: { content: string }) {
const paragraphs = useMemo(
() => splitContentIntoParagraphs(content),
[content],
);
const lastParagraphRef = useRef<HTMLDivElement>(null);
return (
<div className="markdown-streaming-content">
{paragraphs.map((paragraph, index) => (
<div
key={`p-${index}-${paragraph.substring(0, 20)}`}
className="markdown-paragraph markdown-streaming-paragraph"
ref={index === paragraphs.length - 1 ? lastParagraphRef : null}
>
<MemoizedMarkdownParagraph content={paragraph} />
</div>
))}
</div>
);
}
function _MarkDownContent(props: { content: string }) {
const escapedContent = useMemo(() => {
return tryWrapHtmlCode(escapeBrackets(props.content));
@@ -456,27 +326,9 @@ export function Markdown(
fontFamily?: string;
parentRef?: RefObject<HTMLDivElement>;
defaultShow?: boolean;
immediatelyRender?: boolean;
streaming?: boolean; // Whether this is a streaming response
} & React.DOMAttributes<HTMLDivElement>,
) {
const mdRef = useRef<HTMLDivElement>(null);
const paragraphs = useMemo(
() => splitContentIntoParagraphs(props.content),
[props.content],
);
const [loadedCount, setLoadedCount] = useState(0);
// Determine rendering strategy based on props
const shouldAsyncRender =
!props.immediatelyRender && !props.streaming && paragraphs.length > 1;
useEffect(() => {
// Immediately render all paragraphs if specified
if (props.immediatelyRender) {
setLoadedCount(paragraphs.length);
}
}, [props.immediatelyRender, paragraphs.length]);
return (
<div
@@ -492,24 +344,6 @@ export function Markdown(
>
{props.loading ? (
<LoadingIcon />
) : props.streaming ? (
// Use specialized component for streaming content
<StreamingMarkdownContent content={props.content} />
) : shouldAsyncRender ? (
<div className="markdown-content">
{paragraphs.map((paragraph, index) => (
<MarkdownParagraph
key={index}
content={paragraph}
onLoad={() => setLoadedCount((prev) => prev + 1)}
/>
))}
{loadedCount < paragraphs.length && loadedCount > 0 && (
<div className="markdown-paragraph-loading">
<LoadingIcon />
</div>
)}
</div>
) : (
<MarkdownContent content={props.content} />
)}

View File

@@ -417,6 +417,14 @@ export const KnowledgeCutOffDate: Record<string, string> = {
"gpt-4-turbo": "2023-12",
"gpt-4-turbo-2024-04-09": "2023-12",
"gpt-4-turbo-preview": "2023-12",
"gpt-4.1": "2024-06",
"gpt-4.1-2025-04-14": "2024-06",
"gpt-4.1-mini": "2024-06",
"gpt-4.1-mini-2025-04-14": "2024-06",
"gpt-4.1-nano": "2024-06",
"gpt-4.1-nano-2025-04-14": "2024-06",
"gpt-4.5-preview": "2023-10",
"gpt-4.5-preview-2025-02-27": "2023-10",
"gpt-4o": "2023-10",
"gpt-4o-2024-05-13": "2023-10",
"gpt-4o-2024-08-06": "2023-10",
@@ -458,6 +466,7 @@ export const DEFAULT_TTS_VOICES = [
export const VISION_MODEL_REGEXES = [
/vision/,
/gpt-4o/,
/gpt-4\.1/,
/claude-3/,
/gemini-1\.5/,
/gemini-exp/,
@@ -469,6 +478,8 @@ export const VISION_MODEL_REGEXES = [
/^dall-e-3$/, // Matches exactly "dall-e-3"
/glm-4v/,
/vl/i,
/o3/,
/o4-mini/,
];
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
@@ -485,6 +496,14 @@ const openaiModels = [
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4.1",
"gpt-4.1-2025-04-14",
"gpt-4.1-mini",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano",
"gpt-4.1-nano-2025-04-14",
"gpt-4.5-preview",
"gpt-4.5-preview-2025-02-27",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
@@ -499,6 +518,8 @@ const openaiModels = [
"o1-mini",
"o1-preview",
"o3-mini",
"o3",
"o4-mini",
];
const googleModels = [

View File

@@ -99,7 +99,6 @@
font-size: 14px;
line-height: 1.5;
word-wrap: break-word;
margin-bottom: 0;
}
.light {
@@ -359,14 +358,8 @@
.markdown-body kbd {
display: inline-block;
padding: 3px 5px;
font:
11px ui-monospace,
SFMono-Regular,
SF Mono,
Menlo,
Consolas,
Liberation Mono,
monospace;
font: 11px ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas,
Liberation Mono, monospace;
line-height: 10px;
color: var(--color-fg-default);
vertical-align: middle;
@@ -455,28 +448,16 @@
.markdown-body tt,
.markdown-body code,
.markdown-body samp {
font-family:
ui-monospace,
SFMono-Regular,
SF Mono,
Menlo,
Consolas,
Liberation Mono,
monospace;
font-family: ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas,
Liberation Mono, monospace;
font-size: 12px;
}
.markdown-body pre {
margin-top: 0;
margin-bottom: 0;
font-family:
ui-monospace,
SFMono-Regular,
SF Mono,
Menlo,
Consolas,
Liberation Mono,
monospace;
font-family: ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas,
Liberation Mono, monospace;
font-size: 12px;
word-wrap: normal;
}
@@ -1149,87 +1130,3 @@
#dmermaid {
display: none;
}
.markdown-content {
width: 100%;
}
.markdown-paragraph {
transition: opacity 0.3s ease;
margin-bottom: 0.5em;
&.markdown-paragraph-visible {
opacity: 1;
}
&.markdown-paragraph-hidden {
opacity: 0.7;
}
}
.markdown-paragraph-placeholder {
padding: 8px;
color: var(--color-fg-subtle);
background-color: var(--color-canvas-subtle);
border-radius: 6px;
border-left: 3px solid var(--color-border-muted);
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
font-family: var(--font-family-sans);
font-size: 14px;
min-height: 1.2em;
}
.markdown-paragraph-loading {
height: 20px;
background-color: var(--color-canvas-subtle);
border-radius: 6px;
margin-bottom: 8px;
position: relative;
overflow: hidden;
&::after {
content: "";
position: absolute;
top: 0;
left: 0;
width: 30%;
height: 100%;
background: linear-gradient(
90deg,
transparent,
rgba(255, 255, 255, 0.1),
transparent
);
animation: shimmer 1.5s infinite;
}
}
@keyframes shimmer {
0% {
transform: translateX(-100%);
}
100% {
transform: translateX(200%);
}
}
.markdown-streaming-content {
width: 100%;
}
.markdown-streaming-paragraph {
opacity: 1;
animation: fadeIn 0.3s ease-in-out;
margin-bottom: 0.5em;
}
@keyframes fadeIn {
from {
opacity: 0.5;
}
to {
opacity: 1;
}
}