mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-11-14 13:03:49 +08:00
Compare commits
20 Commits
feat/markd
...
ac7b720b5b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ac7b720b5b | ||
|
|
d02f9b0dd4 | ||
|
|
f2a5af7556 | ||
|
|
ff196f22c2 | ||
|
|
106db97f8c | ||
|
|
e30d90714b | ||
|
|
2329d59c83 | ||
|
|
b5ee4c1fcf | ||
|
|
6d69494e08 | ||
|
|
2509495cdc | ||
|
|
d65aca6d13 | ||
|
|
2f5184c5b4 | ||
|
|
20df2eed07 | ||
|
|
fd998de148 | ||
|
|
fd2e69d1c7 | ||
|
|
e8dcede878 | ||
|
|
3b23f5f8ab | ||
|
|
75cdd15bc2 | ||
|
|
af1dfd2a6c | ||
|
|
6aecdd80e9 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -49,3 +49,4 @@ masks.json
|
||||
|
||||
# mcp config
|
||||
app/mcp/mcp_config.json
|
||||
Dockerfile.local
|
||||
|
||||
@@ -71,8 +71,6 @@ const ClaudeMapper = {
|
||||
system: "user",
|
||||
} as const;
|
||||
|
||||
const keys = ["claude-2, claude-instant-1"];
|
||||
|
||||
export class ClaudeApi implements LLMApi {
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
|
||||
@@ -197,8 +197,6 @@ export class GeminiProApi implements LLMApi {
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
const isThinking = options.config.model.includes("-thinking");
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
|
||||
@@ -67,6 +67,8 @@ export interface RequestPayload {
|
||||
top_p: number;
|
||||
max_tokens?: number;
|
||||
max_completion_tokens?: number;
|
||||
reasoning_effort?: string;
|
||||
// O3 only
|
||||
}
|
||||
|
||||
export interface DalleRequestPayload {
|
||||
@@ -196,9 +198,9 @@ export class ChatGPTApi implements LLMApi {
|
||||
let requestPayload: RequestPayload | DalleRequestPayload;
|
||||
|
||||
const isDalle3 = _isDalle3(options.config.model);
|
||||
const isO1OrO3 =
|
||||
options.config.model.startsWith("o1") ||
|
||||
options.config.model.startsWith("o3");
|
||||
const isO1 = options.config.model.startsWith("o1");
|
||||
const isO3 = options.config.model.startsWith("o3");
|
||||
const isO1OrO3 = isO1 || isO3;
|
||||
if (isDalle3) {
|
||||
const prompt = getMessageTextContent(
|
||||
options.messages.slice(-1)?.pop() as any,
|
||||
@@ -242,9 +244,18 @@ export class ChatGPTApi implements LLMApi {
|
||||
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||
}
|
||||
|
||||
if (isO3) {
|
||||
requestPayload["reasoning_effort"] = "high";
|
||||
// make o3-mini defaults to high reasoning effort
|
||||
}
|
||||
|
||||
// add max_tokens to vision model
|
||||
if (visionModel) {
|
||||
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
||||
if (isO1) {
|
||||
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||
} else {
|
||||
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,6 +297,11 @@ export class ChatGPTApi implements LLMApi {
|
||||
isDalle3 ? OpenaiPath.ImagePath : OpenaiPath.ChatPath,
|
||||
);
|
||||
}
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
getTimeoutMSByModel(options.config.model),
|
||||
);
|
||||
if (shouldStream) {
|
||||
let index = -1;
|
||||
const [tools, funcs] = usePluginStore
|
||||
@@ -393,12 +409,6 @@ export class ChatGPTApi implements LLMApi {
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
getTimeoutMSByModel(options.config.model),
|
||||
);
|
||||
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ import ReturnIcon from "../icons/return.svg";
|
||||
import CopyIcon from "../icons/copy.svg";
|
||||
import SpeakIcon from "../icons/speak.svg";
|
||||
import SpeakStopIcon from "../icons/speak-stop.svg";
|
||||
import LoadingIcon from "../icons/three-dots.svg";
|
||||
import LoadingButtonIcon from "../icons/loading.svg";
|
||||
import PromptIcon from "../icons/prompt.svg";
|
||||
import MaskIcon from "../icons/mask.svg";
|
||||
@@ -78,6 +79,8 @@ import {
|
||||
|
||||
import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
|
||||
|
||||
import dynamic from "next/dynamic";
|
||||
|
||||
import { ChatControllerPool } from "../client/controller";
|
||||
import { DalleQuality, DalleStyle, ModelSize } from "../typing";
|
||||
import { Prompt, usePromptStore } from "../store/prompt";
|
||||
@@ -122,15 +125,14 @@ import { getModelProvider } from "../utils/model";
|
||||
import { RealtimeChat } from "@/app/components/realtime-chat";
|
||||
import clsx from "clsx";
|
||||
import { getAvailableClientsCount, isMcpEnabled } from "../mcp/actions";
|
||||
import { Markdown } from "./markdown";
|
||||
|
||||
const localStorage = safeLocalStorage();
|
||||
|
||||
const ttsPlayer = createTTSPlayer();
|
||||
|
||||
// const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
|
||||
// loading: () => <LoadingIcon />,
|
||||
// });
|
||||
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
|
||||
loading: () => <LoadingIcon />,
|
||||
});
|
||||
|
||||
const MCPAction = () => {
|
||||
const navigate = useNavigate();
|
||||
@@ -1982,8 +1984,6 @@ function _Chat() {
|
||||
fontFamily={fontFamily}
|
||||
parentRef={scrollRef}
|
||||
defaultShow={i >= messages.length - 6}
|
||||
immediatelyRender={i >= messages.length - 3}
|
||||
streaming={message.streaming}
|
||||
/>
|
||||
{getMessageImages(message).length == 1 && (
|
||||
<img
|
||||
|
||||
@@ -267,136 +267,6 @@ function tryWrapHtmlCode(text: string) {
|
||||
);
|
||||
}
|
||||
|
||||
// Split content into paragraphs while preserving code blocks
|
||||
function splitContentIntoParagraphs(content: string) {
|
||||
// Check for unclosed code blocks
|
||||
const codeBlockStartCount = (content.match(/```/g) || []).length;
|
||||
let processedContent = content;
|
||||
|
||||
// Add closing tag if there's an odd number of code block markers
|
||||
if (codeBlockStartCount % 2 !== 0) {
|
||||
processedContent = content + "\n```";
|
||||
}
|
||||
|
||||
// Extract code blocks
|
||||
const codeBlockRegex = /```[\s\S]*?```/g;
|
||||
const codeBlocks: string[] = [];
|
||||
let codeBlockCounter = 0;
|
||||
|
||||
// Replace code blocks with placeholders
|
||||
const contentWithPlaceholders = processedContent.replace(
|
||||
codeBlockRegex,
|
||||
(match) => {
|
||||
codeBlocks.push(match);
|
||||
const placeholder = `__CODE_BLOCK_${codeBlockCounter++}__`;
|
||||
return placeholder;
|
||||
},
|
||||
);
|
||||
|
||||
// Split by double newlines
|
||||
const paragraphs = contentWithPlaceholders
|
||||
.split(/\n\n+/)
|
||||
.filter((p) => p.trim());
|
||||
|
||||
// Restore code blocks
|
||||
return paragraphs.map((p) => {
|
||||
if (p.match(/__CODE_BLOCK_\d+__/)) {
|
||||
return p.replace(/__CODE_BLOCK_\d+__/g, (match) => {
|
||||
const index = parseInt(match.match(/\d+/)?.[0] || "0");
|
||||
return codeBlocks[index] || match;
|
||||
});
|
||||
}
|
||||
return p;
|
||||
});
|
||||
}
|
||||
|
||||
// Lazy-loaded paragraph component
|
||||
function MarkdownParagraph({
|
||||
content,
|
||||
onLoad,
|
||||
}: {
|
||||
content: string;
|
||||
onLoad?: () => void;
|
||||
}) {
|
||||
const [isLoaded, setIsLoaded] = useState(false);
|
||||
const placeholderRef = useRef<HTMLDivElement>(null);
|
||||
const [isVisible, setIsVisible] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
let observer: IntersectionObserver;
|
||||
if (placeholderRef.current) {
|
||||
observer = new IntersectionObserver(
|
||||
(entries) => {
|
||||
if (entries[0].isIntersecting) {
|
||||
setIsVisible(true);
|
||||
}
|
||||
},
|
||||
{ threshold: 0.1, rootMargin: "200px 0px" },
|
||||
);
|
||||
observer.observe(placeholderRef.current);
|
||||
}
|
||||
return () => observer?.disconnect();
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
if (isVisible && !isLoaded) {
|
||||
setIsLoaded(true);
|
||||
onLoad?.();
|
||||
}
|
||||
}, [isVisible, isLoaded, onLoad]);
|
||||
|
||||
// Generate preview content
|
||||
const previewContent = useMemo(() => {
|
||||
if (content.startsWith("```")) {
|
||||
return "```" + (content.split("\n")[0] || "").slice(3) + "...```";
|
||||
}
|
||||
return content.length > 60 ? content.slice(0, 60) + "..." : content;
|
||||
}, [content]);
|
||||
|
||||
return (
|
||||
<div className="markdown-paragraph" ref={placeholderRef}>
|
||||
{!isLoaded ? (
|
||||
<div className="markdown-paragraph-placeholder">{previewContent}</div>
|
||||
) : (
|
||||
<_MarkDownContent content={content} />
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// Memoized paragraph component to prevent unnecessary re-renders
|
||||
const MemoizedMarkdownParagraph = React.memo(
|
||||
({ content }: { content: string }) => {
|
||||
return <_MarkDownContent content={content} />;
|
||||
},
|
||||
(prevProps, nextProps) => prevProps.content === nextProps.content,
|
||||
);
|
||||
|
||||
MemoizedMarkdownParagraph.displayName = "MemoizedMarkdownParagraph";
|
||||
|
||||
// Specialized component for streaming content
|
||||
function StreamingMarkdownContent({ content }: { content: string }) {
|
||||
const paragraphs = useMemo(
|
||||
() => splitContentIntoParagraphs(content),
|
||||
[content],
|
||||
);
|
||||
const lastParagraphRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
return (
|
||||
<div className="markdown-streaming-content">
|
||||
{paragraphs.map((paragraph, index) => (
|
||||
<div
|
||||
key={`p-${index}-${paragraph.substring(0, 20)}`}
|
||||
className="markdown-paragraph markdown-streaming-paragraph"
|
||||
ref={index === paragraphs.length - 1 ? lastParagraphRef : null}
|
||||
>
|
||||
<MemoizedMarkdownParagraph content={paragraph} />
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function _MarkDownContent(props: { content: string }) {
|
||||
const escapedContent = useMemo(() => {
|
||||
return tryWrapHtmlCode(escapeBrackets(props.content));
|
||||
@@ -456,27 +326,9 @@ export function Markdown(
|
||||
fontFamily?: string;
|
||||
parentRef?: RefObject<HTMLDivElement>;
|
||||
defaultShow?: boolean;
|
||||
immediatelyRender?: boolean;
|
||||
streaming?: boolean; // Whether this is a streaming response
|
||||
} & React.DOMAttributes<HTMLDivElement>,
|
||||
) {
|
||||
const mdRef = useRef<HTMLDivElement>(null);
|
||||
const paragraphs = useMemo(
|
||||
() => splitContentIntoParagraphs(props.content),
|
||||
[props.content],
|
||||
);
|
||||
const [loadedCount, setLoadedCount] = useState(0);
|
||||
|
||||
// Determine rendering strategy based on props
|
||||
const shouldAsyncRender =
|
||||
!props.immediatelyRender && !props.streaming && paragraphs.length > 1;
|
||||
|
||||
useEffect(() => {
|
||||
// Immediately render all paragraphs if specified
|
||||
if (props.immediatelyRender) {
|
||||
setLoadedCount(paragraphs.length);
|
||||
}
|
||||
}, [props.immediatelyRender, paragraphs.length]);
|
||||
|
||||
return (
|
||||
<div
|
||||
@@ -492,24 +344,6 @@ export function Markdown(
|
||||
>
|
||||
{props.loading ? (
|
||||
<LoadingIcon />
|
||||
) : props.streaming ? (
|
||||
// Use specialized component for streaming content
|
||||
<StreamingMarkdownContent content={props.content} />
|
||||
) : shouldAsyncRender ? (
|
||||
<div className="markdown-content">
|
||||
{paragraphs.map((paragraph, index) => (
|
||||
<MarkdownParagraph
|
||||
key={index}
|
||||
content={paragraph}
|
||||
onLoad={() => setLoadedCount((prev) => prev + 1)}
|
||||
/>
|
||||
))}
|
||||
{loadedCount < paragraphs.length && loadedCount > 0 && (
|
||||
<div className="markdown-paragraph-loading">
|
||||
<LoadingIcon />
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
) : (
|
||||
<MarkdownContent content={props.content} />
|
||||
)}
|
||||
|
||||
@@ -408,35 +408,20 @@ You are an AI assistant with access to system tools. Your role is to help users
|
||||
|
||||
`;
|
||||
|
||||
export const SUMMARIZE_MODEL = "gpt-4o-mini";
|
||||
export const GEMINI_SUMMARIZE_MODEL = "gemini-pro";
|
||||
export const SUMMARIZE_MODEL = "gpt-4.1-mini";
|
||||
export const GEMINI_SUMMARIZE_MODEL = "gemini-2.0-flash";
|
||||
export const DEEPSEEK_SUMMARIZE_MODEL = "deepseek-chat";
|
||||
|
||||
export const KnowledgeCutOffDate: Record<string, string> = {
|
||||
default: "2021-09",
|
||||
"gpt-4-turbo": "2023-12",
|
||||
"gpt-4-turbo-2024-04-09": "2023-12",
|
||||
"gpt-4-turbo-preview": "2023-12",
|
||||
"gpt-4o": "2023-10",
|
||||
"gpt-4o-2024-05-13": "2023-10",
|
||||
"gpt-4o-2024-08-06": "2023-10",
|
||||
"gpt-4o-2024-11-20": "2023-10",
|
||||
"chatgpt-4o-latest": "2023-10",
|
||||
"gpt-4o-mini": "2023-10",
|
||||
"gpt-4o-mini-2024-07-18": "2023-10",
|
||||
"gpt-4-vision-preview": "2023-04",
|
||||
"o1-mini-2024-09-12": "2023-10",
|
||||
"o1-mini": "2023-10",
|
||||
"o1-preview-2024-09-12": "2023-10",
|
||||
"o1-preview": "2023-10",
|
||||
"o1-2024-12-17": "2023-10",
|
||||
o1: "2023-10",
|
||||
"o3-mini-2025-01-31": "2023-10",
|
||||
"o3-mini": "2023-10",
|
||||
default: "2023-10",
|
||||
// After improvements,
|
||||
// it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
|
||||
"gemini-pro": "2023-12",
|
||||
"gemini-pro-vision": "2023-12",
|
||||
"gemini-2.5-pro-exp-03-25": "2025-01",
|
||||
"gemini-2.0-flash": "2024-08",
|
||||
"claude-3-7-sonnet-latest": "2024-10",
|
||||
"claude-3-5-haiku-latest": "2024-10",
|
||||
"gpt-4.1": "2024-06",
|
||||
"gpt-4.1-mini": "2024-06",
|
||||
"deepseek-chat": "2024-07",
|
||||
"deepseek-coder": "2024-07",
|
||||
};
|
||||
@@ -457,11 +442,12 @@ export const DEFAULT_TTS_VOICES = [
|
||||
|
||||
export const VISION_MODEL_REGEXES = [
|
||||
/vision/,
|
||||
/gpt-4o/,
|
||||
/gpt-4\.1/,
|
||||
/claude-3/,
|
||||
/gemini-1\.5/,
|
||||
/gemini-exp/,
|
||||
/gemini-2\.0/,
|
||||
/gemini-2\.5-pro/,
|
||||
/learnlm/,
|
||||
/qwen-vl/,
|
||||
/qwen2-vl/,
|
||||
@@ -469,78 +455,23 @@ export const VISION_MODEL_REGEXES = [
|
||||
/^dall-e-3$/, // Matches exactly "dall-e-3"
|
||||
/glm-4v/,
|
||||
/vl/i,
|
||||
/o1/,
|
||||
];
|
||||
|
||||
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
|
||||
|
||||
const openaiModels = [
|
||||
// As of July 2024, gpt-4o-mini should be used in place of gpt-3.5-turbo,
|
||||
// as it is cheaper, more capable, multimodal, and just as fast. gpt-3.5-turbo is still available for use in the API.
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-4",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
"gpt-4o-2024-11-20",
|
||||
"chatgpt-4o-latest",
|
||||
"gpt-4o-mini",
|
||||
"gpt-4o-mini-2024-07-18",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-1106-preview",
|
||||
"dall-e-3",
|
||||
"o1-mini",
|
||||
"o1-preview",
|
||||
"o3-mini",
|
||||
];
|
||||
const openaiModels = ["dall-e-3", "o1", "o3-mini", "gpt-4.1", "gpt-4.1-mini"];
|
||||
|
||||
const googleModels = [
|
||||
"gemini-1.0-pro", // Deprecated on 2/15/2025
|
||||
"gemini-1.5-pro-latest",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-pro-002",
|
||||
"gemini-1.5-pro-exp-0827",
|
||||
"gemini-1.5-flash-latest",
|
||||
"gemini-1.5-flash-8b-latest",
|
||||
"gemini-1.5-flash",
|
||||
"gemini-1.5-flash-8b",
|
||||
"gemini-1.5-flash-002",
|
||||
"gemini-1.5-flash-exp-0827",
|
||||
"learnlm-1.5-pro-experimental",
|
||||
"gemini-exp-1114",
|
||||
"gemini-exp-1121",
|
||||
"gemini-exp-1206",
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-lite-preview-02-05",
|
||||
"gemini-2.0-flash-thinking-exp",
|
||||
"gemini-2.0-flash-thinking-exp-1219",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
"gemini-2.0-pro-exp",
|
||||
"gemini-2.0-pro-exp-02-05",
|
||||
"gemini-2.0-flash-lite",
|
||||
"gemini-2.5-pro-exp-03-25",
|
||||
];
|
||||
|
||||
const anthropicModels = [
|
||||
"claude-instant-1.2",
|
||||
"claude-2.0",
|
||||
"claude-2.1",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-opus-latest",
|
||||
"claude-3-haiku-20240307",
|
||||
"claude-3-5-haiku-20241022",
|
||||
"claude-3-5-haiku-latest",
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
"claude-3-5-sonnet-latest",
|
||||
"claude-3-7-sonnet-20250219",
|
||||
"claude-3-7-sonnet-latest",
|
||||
];
|
||||
|
||||
|
||||
@@ -66,14 +66,14 @@ export const DEFAULT_CONFIG = {
|
||||
modelConfig: {
|
||||
model: "gpt-4o-mini" as ModelType,
|
||||
providerName: "OpenAI" as ServiceProvider,
|
||||
temperature: 0.5,
|
||||
temperature: 0.2,
|
||||
top_p: 1,
|
||||
max_tokens: 4000,
|
||||
presence_penalty: 0,
|
||||
frequency_penalty: 0,
|
||||
sendMemory: true,
|
||||
historyMessageCount: 4,
|
||||
compressMessageLengthThreshold: 1000,
|
||||
historyMessageCount: 20,
|
||||
compressMessageLengthThreshold: 5000,
|
||||
compressModel: "",
|
||||
compressProviderName: "",
|
||||
enableInjectSystemPrompts: true,
|
||||
|
||||
@@ -99,7 +99,6 @@
|
||||
font-size: 14px;
|
||||
line-height: 1.5;
|
||||
word-wrap: break-word;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
.light {
|
||||
@@ -359,14 +358,8 @@
|
||||
.markdown-body kbd {
|
||||
display: inline-block;
|
||||
padding: 3px 5px;
|
||||
font:
|
||||
11px ui-monospace,
|
||||
SFMono-Regular,
|
||||
SF Mono,
|
||||
Menlo,
|
||||
Consolas,
|
||||
Liberation Mono,
|
||||
monospace;
|
||||
font: 11px ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas,
|
||||
Liberation Mono, monospace;
|
||||
line-height: 10px;
|
||||
color: var(--color-fg-default);
|
||||
vertical-align: middle;
|
||||
@@ -455,28 +448,16 @@
|
||||
.markdown-body tt,
|
||||
.markdown-body code,
|
||||
.markdown-body samp {
|
||||
font-family:
|
||||
ui-monospace,
|
||||
SFMono-Regular,
|
||||
SF Mono,
|
||||
Menlo,
|
||||
Consolas,
|
||||
Liberation Mono,
|
||||
monospace;
|
||||
font-family: ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas,
|
||||
Liberation Mono, monospace;
|
||||
font-size: 12px;
|
||||
}
|
||||
|
||||
.markdown-body pre {
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
font-family:
|
||||
ui-monospace,
|
||||
SFMono-Regular,
|
||||
SF Mono,
|
||||
Menlo,
|
||||
Consolas,
|
||||
Liberation Mono,
|
||||
monospace;
|
||||
font-family: ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas,
|
||||
Liberation Mono, monospace;
|
||||
font-size: 12px;
|
||||
word-wrap: normal;
|
||||
}
|
||||
@@ -1149,87 +1130,3 @@
|
||||
#dmermaid {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.markdown-content {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.markdown-paragraph {
|
||||
transition: opacity 0.3s ease;
|
||||
margin-bottom: 0.5em;
|
||||
|
||||
&.markdown-paragraph-visible {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
&.markdown-paragraph-hidden {
|
||||
opacity: 0.7;
|
||||
}
|
||||
}
|
||||
|
||||
.markdown-paragraph-placeholder {
|
||||
padding: 8px;
|
||||
color: var(--color-fg-subtle);
|
||||
background-color: var(--color-canvas-subtle);
|
||||
border-radius: 6px;
|
||||
border-left: 3px solid var(--color-border-muted);
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
font-family: var(--font-family-sans);
|
||||
font-size: 14px;
|
||||
min-height: 1.2em;
|
||||
}
|
||||
|
||||
.markdown-paragraph-loading {
|
||||
height: 20px;
|
||||
background-color: var(--color-canvas-subtle);
|
||||
border-radius: 6px;
|
||||
margin-bottom: 8px;
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
|
||||
&::after {
|
||||
content: "";
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 30%;
|
||||
height: 100%;
|
||||
background: linear-gradient(
|
||||
90deg,
|
||||
transparent,
|
||||
rgba(255, 255, 255, 0.1),
|
||||
transparent
|
||||
);
|
||||
animation: shimmer 1.5s infinite;
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes shimmer {
|
||||
0% {
|
||||
transform: translateX(-100%);
|
||||
}
|
||||
100% {
|
||||
transform: translateX(200%);
|
||||
}
|
||||
}
|
||||
|
||||
.markdown-streaming-content {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.markdown-streaming-paragraph {
|
||||
opacity: 1;
|
||||
animation: fadeIn 0.3s ease-in-out;
|
||||
margin-bottom: 0.5em;
|
||||
}
|
||||
|
||||
@keyframes fadeIn {
|
||||
from {
|
||||
opacity: 0.5;
|
||||
}
|
||||
to {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
|
||||
13
app/utils.ts
13
app/utils.ts
@@ -304,9 +304,18 @@ export function getTimeoutMSByModel(model: string) {
|
||||
model.startsWith("o1") ||
|
||||
model.startsWith("o3") ||
|
||||
model.includes("deepseek-r") ||
|
||||
model.includes("-thinking")
|
||||
)
|
||||
model.includes("-thinking") ||
|
||||
model.includes("pro")
|
||||
) {
|
||||
console.log(
|
||||
"thinking model is " +
|
||||
model +
|
||||
" timeout is " +
|
||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||
);
|
||||
return REQUEST_TIMEOUT_MS_FOR_THINKING;
|
||||
}
|
||||
console.log("normal model is " + model + " timeout is " + REQUEST_TIMEOUT_MS);
|
||||
return REQUEST_TIMEOUT_MS;
|
||||
}
|
||||
|
||||
|
||||
17486
package-lock.json
generated
Normal file
17486
package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,7 @@ import { isModelNotavailableInServer } from "../app/utils/model";
|
||||
describe("isModelNotavailableInServer", () => {
|
||||
test("test model will return false, which means the model is available", () => {
|
||||
const customModels = "";
|
||||
const modelName = "gpt-4";
|
||||
const modelName = "gpt-4.1";
|
||||
const providerNames = "OpenAI";
|
||||
const result = isModelNotavailableInServer(
|
||||
customModels,
|
||||
|
||||
@@ -15,10 +15,11 @@ describe("isVisionModel", () => {
|
||||
|
||||
test("should identify vision models using regex patterns", () => {
|
||||
const visionModels = [
|
||||
"gpt-4-vision",
|
||||
"gpt-4.1",
|
||||
"claude-3-opus",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-2.0",
|
||||
"gemini-2.5-pro",
|
||||
"gemini-exp-vision",
|
||||
"learnlm-vision",
|
||||
"qwen-vl-max",
|
||||
|
||||
Reference in New Issue
Block a user