mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-10-08 11:06:37 +08:00
Compare commits
14 Commits
5862caff27
...
2551f7a1b0
Author | SHA1 | Date | |
---|---|---|---|
|
2551f7a1b0 | ||
|
673f907ea4 | ||
|
fb3af2a08f | ||
|
eb193ac0ff | ||
|
c30ddfbb07 | ||
|
a2f0149786 | ||
|
03d36f96ed | ||
|
705dffc664 | ||
|
02f7e6de98 | ||
|
843dc52efa | ||
|
c261ebc82c | ||
|
f7c747c65f | ||
|
e2429d444b | ||
|
c15dbf5296 |
@ -22,12 +22,12 @@ English / [简体中文](./README_CN.md)
|
||||
[![MacOS][MacOS-image]][download-url]
|
||||
[![Linux][Linux-image]][download-url]
|
||||
|
||||
[NextChatAI](https://nextchat.club?utm_source=readme) / [iOS APP](https://apps.apple.com/us/app/nextchat-ai/id6743085599) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Enterprise Edition](#enterprise-edition)
|
||||
[NextChatAI](https://nextchat.club?utm_source=readme) / [iOS APP](https://apps.apple.com/us/app/nextchat-ai/id6743085599) / [Web App Demo](https://app.nextchat.club) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Enterprise Edition](#enterprise-edition)
|
||||
|
||||
|
||||
[saas-url]: https://nextchat.club?utm_source=readme
|
||||
[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge
|
||||
[web-url]: https://app.nextchat.dev/
|
||||
[web-url]: https://app.nextchat.club/
|
||||
[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases
|
||||
[Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge
|
||||
[Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows
|
||||
|
@ -117,7 +117,7 @@ export class DoubaoApi implements LLMApi {
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path(ByteDance.ChatPath);
|
||||
const chatPath = this.path(ByteDance.ChatPath(modelConfig.model));
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
|
@ -56,7 +56,7 @@ export interface OpenAIListModelResponse {
|
||||
|
||||
export interface RequestPayload {
|
||||
messages: {
|
||||
role: "system" | "user" | "assistant";
|
||||
role: "developer" | "system" | "user" | "assistant";
|
||||
content: string | MultimodalContent[];
|
||||
}[];
|
||||
stream?: boolean;
|
||||
@ -238,8 +238,16 @@ export class ChatGPTApi implements LLMApi {
|
||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||
};
|
||||
|
||||
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
|
||||
if (isO1OrO3) {
|
||||
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
|
||||
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
|
||||
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
|
||||
requestPayload["messages"].unshift({
|
||||
role: "developer",
|
||||
content: "Formatting re-enabled",
|
||||
});
|
||||
|
||||
// o1/o3 uses max_completion_tokens to control the number of tokens (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
|
||||
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||
}
|
||||
|
||||
|
@ -1868,7 +1868,7 @@ function _Chat() {
|
||||
</div>
|
||||
{!isUser && (
|
||||
<div className={styles["chat-model-name"]}>
|
||||
{message.model}
|
||||
{message.modelDisplayName ?? message.model}
|
||||
</div>
|
||||
)}
|
||||
|
||||
|
@ -82,7 +82,11 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
|
||||
LlmIcon = BotIconGrok;
|
||||
} else if (modelName.startsWith("hunyuan")) {
|
||||
LlmIcon = BotIconHunyuan;
|
||||
} else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
|
||||
} else if (
|
||||
modelName.startsWith("doubao") ||
|
||||
modelName.startsWith("ep-") ||
|
||||
modelName.startsWith("bot-")
|
||||
) {
|
||||
LlmIcon = BotIconDoubao;
|
||||
} else if (
|
||||
modelName.includes("glm") ||
|
||||
|
@ -216,7 +216,13 @@ export const Baidu = {
|
||||
|
||||
export const ByteDance = {
|
||||
ExampleEndpoint: "https://ark.cn-beijing.volces.com/api/",
|
||||
ChatPath: "api/v3/chat/completions",
|
||||
ChatPath: (modelName: string) => {
|
||||
if (modelName.startsWith("bot-")) {
|
||||
return "api/v3/bots/chat/completions";
|
||||
} else {
|
||||
return "api/v3/chat/completions";
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
export const Alibaba = {
|
||||
@ -523,20 +529,15 @@ const openaiModels = [
|
||||
];
|
||||
|
||||
const googleModels = [
|
||||
"gemini-1.0-pro", // Deprecated on 2/15/2025
|
||||
"gemini-1.5-pro-latest",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-pro-002",
|
||||
"gemini-1.5-pro-exp-0827",
|
||||
"gemini-1.5-flash-latest",
|
||||
"gemini-1.5-flash-8b-latest",
|
||||
"gemini-1.5-flash",
|
||||
"gemini-1.5-flash-8b",
|
||||
"gemini-1.5-flash-002",
|
||||
"gemini-1.5-flash-exp-0827",
|
||||
"learnlm-1.5-pro-experimental",
|
||||
"gemini-exp-1114",
|
||||
"gemini-exp-1121",
|
||||
"gemini-exp-1206",
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-exp",
|
||||
@ -546,6 +547,7 @@ const googleModels = [
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
"gemini-2.0-pro-exp",
|
||||
"gemini-2.0-pro-exp-02-05",
|
||||
"gemini-2.5-pro-preview-06-05",
|
||||
];
|
||||
|
||||
const anthropicModels = [
|
||||
@ -632,6 +634,18 @@ const xAIModes = [
|
||||
"grok-2-vision-1212",
|
||||
"grok-2-vision",
|
||||
"grok-2-vision-latest",
|
||||
"grok-3-mini-fast-beta",
|
||||
"grok-3-mini-fast",
|
||||
"grok-3-mini-fast-latest",
|
||||
"grok-3-mini-beta",
|
||||
"grok-3-mini",
|
||||
"grok-3-mini-latest",
|
||||
"grok-3-fast-beta",
|
||||
"grok-3-fast",
|
||||
"grok-3-fast-latest",
|
||||
"grok-3-beta",
|
||||
"grok-3",
|
||||
"grok-3-latest",
|
||||
];
|
||||
|
||||
const chatglmModels = [
|
||||
|
@ -60,6 +60,7 @@ export type ChatMessage = RequestMessage & {
|
||||
isError?: boolean;
|
||||
id: string;
|
||||
model?: ModelType;
|
||||
modelDisplayName?: string;
|
||||
tools?: ChatMessageTool[];
|
||||
audio_url?: string;
|
||||
isMcpResponse?: boolean;
|
||||
@ -151,6 +152,24 @@ function getSummarizeModel(
|
||||
return [currentModel, providerName];
|
||||
}
|
||||
|
||||
function getModelDisplayName(
|
||||
model: ModelType,
|
||||
providerName: ServiceProvider,
|
||||
): string | undefined {
|
||||
const configStore = useAppConfig.getState();
|
||||
const accessStore = useAccessStore.getState();
|
||||
const allModel = collectModelsWithDefaultModel(
|
||||
configStore.models,
|
||||
[configStore.customModels, accessStore.customModels].join(","),
|
||||
accessStore.defaultModel,
|
||||
);
|
||||
|
||||
const matchedModel = allModel.find(
|
||||
(m) => m.name === model && m.provider?.providerName === providerName,
|
||||
);
|
||||
return matchedModel ? matchedModel.displayName : undefined;
|
||||
}
|
||||
|
||||
function countMessages(msgs: ChatMessage[]) {
|
||||
return msgs.reduce(
|
||||
(pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
|
||||
@ -437,6 +456,10 @@ export const useChatStore = createPersistStore(
|
||||
role: "assistant",
|
||||
streaming: true,
|
||||
model: modelConfig.model,
|
||||
modelDisplayName: getModelDisplayName(
|
||||
modelConfig.model,
|
||||
modelConfig.providerName,
|
||||
),
|
||||
});
|
||||
|
||||
// get recent messages
|
||||
|
@ -304,7 +304,9 @@ export function getTimeoutMSByModel(model: string) {
|
||||
model.startsWith("o1") ||
|
||||
model.startsWith("o3") ||
|
||||
model.includes("deepseek-r") ||
|
||||
model.includes("-thinking")
|
||||
model.includes("-thinking") ||
|
||||
model.startsWith("ep-") ||
|
||||
model.startsWith("bot-")
|
||||
)
|
||||
return REQUEST_TIMEOUT_MS_FOR_THINKING;
|
||||
return REQUEST_TIMEOUT_MS;
|
||||
|
Loading…
Reference in New Issue
Block a user